Esempio n. 1
0
int main(int argc, char *argv[])
{
    int size, rank;
    MPI_Group world_group;
    MPI_Comm group_comm, idup_comm;
    MPI_Request req;
    MPI_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (size % 2) {
        fprintf(stderr, "this program requires even number of processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Create some groups */
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(MPI_COMM_WORLD, world_group, 0, &group_comm);
        MPI_Comm_idup(MPI_COMM_WORLD, &idup_comm, &req);
    }
    else {
        MPI_Comm_idup(MPI_COMM_WORLD, &idup_comm, &req);
        MPI_Comm_create_group(MPI_COMM_WORLD, world_group, 0, &group_comm);
    }

    MPI_Wait(&req, MPI_STATUSES_IGNORE);
    /*Test new comm with a barrier */
    MPI_Barrier(idup_comm);
    MPI_Barrier(group_comm);

    MPI_Group_free(&world_group);
    MPI_Comm_free(&idup_comm);
    MPI_Comm_free(&group_comm);
    if (rank == 0)
        printf(" No errors\n");

    MPI_Finalize();
    return 0;
}
Esempio n. 2
0
int main(int argc, char *argv[])
{
    int size, rank, i, *excl;
    MPI_Group world_group, even_group;
    MPI_Comm  __attribute__((unused)) even_comm;

    MPI_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    if (size % 2) {
        fprintf(stderr, "this program requires a multiple of 2 number of processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
        exit(1);
    }

    excl = malloc((size / 2) * sizeof(int));
    assert(excl);

    /* exclude the odd ranks */
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    /* Create some groups */
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
    MPI_Group_excl(world_group, size / 2, excl, &even_group);
    MPI_Group_free(&world_group);

#if !defined(USE_STRICT_MPI) && defined(MPICH)
    if (rank % 2 == 0) {
        /* Even processes create a group for themselves */
        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &even_comm);
        MPI_Barrier(even_comm);
        MPI_Comm_free(&even_comm);
    }
#endif /* USE_STRICT_MPI */

    MPI_Group_free(&even_group);
    MPI_Barrier(MPI_COMM_WORLD);

    if (rank == 0)
        printf(" No errors\n");

    MPI_Finalize();
    return 0;
}
Esempio n. 3
0
int main(int argc, char **argv) {
	MPI_Init(NULL, NULL);

	// Get the rank and size in the original communicator
	int world_rank, world_size;
	MPI_Comm_rank(MPI_COMM_WORLD, &world_rank);
	MPI_Comm_size(MPI_COMM_WORLD, &world_size);

	// Get the group of processes in MPI_COMM_WORLD
	MPI_Group world_group;
	MPI_Comm_group(MPI_COMM_WORLD, &world_group);

	int n = 7;
	const int ranks[7] = {1, 2, 3, 5, 7, 11, 13};

	// Construct a group containing all of the prime ranks in world_group
	MPI_Group prime_group;
	MPI_Group_incl(world_group, 7, ranks, &prime_group);

	// Create a new communicator based on the group
	MPI_Comm prime_comm;
	MPI_Comm_create_group(MPI_COMM_WORLD, prime_group, 0, &prime_comm);

	int prime_rank = -1, prime_size = -1;
	// If this rank isn't in the new communicator, it will be MPI_COMM_NULL
	// Using MPI_COMM_NULL for MPI_Comm_rank or MPI_Comm_size is erroneous
	if (MPI_COMM_NULL != prime_comm) {
		MPI_Comm_rank(prime_comm, &prime_rank);
		MPI_Comm_size(prime_comm, &prime_size);
	}

	printf("WORLD RANK/SIZE: %d/%d \t PRIME RANK/SIZE: %d/%d\n",
		world_rank, world_size, prime_rank, prime_size);

	MPI_Group_free(&world_group);
	MPI_Group_free(&prime_group);
	MPI_Comm_free(&prime_comm);

	MPI_Finalize();
}
Esempio n. 4
0
int main(int argc, char **argv)
{
    int errs = 0;
    int i;
    int rank, size;
    int *excl;
    int ranges[1][3];
    int isLeft, rleader;
    MPI_Group world_group, high_group, even_group;
    MPI_Comm local_comm, inter_comm, test_comm, outcomm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    if (size < 2) {
        printf("this test requires at least 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Idup MPI_COMM_WORLD multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(MPI_COMM_WORLD, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */

    /* Comm_dup */
    MPI_Comm_dup(MPI_COMM_WORLD, &outcomm);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(MPI_COMM_WORLD, rank % 2, size - rank, &outcomm);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of MPI_COMM_WORLD */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(world_group, 1, ranges, &high_group);
    MPI_Comm_create(MPI_COMM_WORLD, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of MPI_COMM_WORLD */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(world_group, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &outcomm);
    } else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);

    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm);

    if (rank == 0) {
        rleader = size / 2;
    } else if (rank == size / 2) {
        rleader = 0;
    } else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);

    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }

    MPI_Group_free(&world_group);

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
Esempio n. 5
0
/*
 * Get an intracommunicator with at least min_size members.  If "allowSmaller"
 * is true, allow the communicator to be smaller than MPI_COMM_WORLD and
 * for this routine to return MPI_COMM_NULL for some values.  Returns 0 if
 * no more communicators are available.
 */
int MTestGetIntracommGeneral(MPI_Comm * comm, int min_size, int allowSmaller)
{
    int size, rank, merr;
    int done = 0;
    int isBasic = 0;

    /* The while loop allows us to skip communicators that are too small.
     * MPI_COMM_NULL is always considered large enough */
    while (!done) {
        isBasic = 0;
        intraCommName = "";
        switch (intraCommIdx) {
        case 0:
            *comm = MPI_COMM_WORLD;
            isBasic = 1;
            intraCommName = "MPI_COMM_WORLD";
            break;
        case 1:
            /* dup of world */
            merr = MPI_Comm_dup(MPI_COMM_WORLD, comm);
            if (merr)
                MTestPrintError(merr);
            intraCommName = "Dup of MPI_COMM_WORLD";
            break;
        case 2:
            /* reverse ranks */
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_split(MPI_COMM_WORLD, 0, size - rank, comm);
            if (merr)
                MTestPrintError(merr);
            intraCommName = "Rank reverse of MPI_COMM_WORLD";
            break;
        case 3:
            /* subset of world, with reversed ranks */
            merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
            if (merr)
                MTestPrintError(merr);
            merr = MPI_Comm_split(MPI_COMM_WORLD, ((rank < size / 2) ? 1 : MPI_UNDEFINED),
                                  size - rank, comm);
            if (merr)
                MTestPrintError(merr);
            intraCommName = "Rank reverse of half of MPI_COMM_WORLD";
            break;
        case 4:
            *comm = MPI_COMM_SELF;
            isBasic = 1;
            intraCommName = "MPI_COMM_SELF";
            break;
        case 5:
            {
#if MTEST_HAVE_MIN_MPI_VERSION(3,0)
                /* Dup of the world using MPI_Intercomm_merge */
                int rleader, isLeft;
                MPI_Comm local_comm, inter_comm;
                MPI_Comm_size(MPI_COMM_WORLD, &size);
                MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                if (size > 1) {
                    merr = MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm);
                    if (merr)
                        MTestPrintError(merr);
                    if (rank == 0) {
                        rleader = size / 2;
                    }
                    else if (rank == size / 2) {
                        rleader = 0;
                    }
                    else {
                        rleader = -1;
                    }
                    isLeft = rank < size / 2;
                    merr =
                        MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99,
                                             &inter_comm);
                    if (merr)
                        MTestPrintError(merr);
                    merr = MPI_Intercomm_merge(inter_comm, isLeft, comm);
                    if (merr)
                        MTestPrintError(merr);
                    MPI_Comm_free(&inter_comm);
                    MPI_Comm_free(&local_comm);
                    intraCommName = "Dup of WORLD created by MPI_Intercomm_merge";
                }
                else {
                    *comm = MPI_COMM_NULL;
                }
            }
            break;
        case 6:
            {
                /* Even of the world using MPI_Comm_create_group */
                int i;
                MPI_Group world_group, even_group;
                int *excl = NULL;

                MPI_Comm_size(MPI_COMM_WORLD, &size);
                MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                if (allowSmaller && (size + 1) / 2 >= min_size) {
                    /* exclude the odd ranks */
                    excl = malloc((size / 2) * sizeof(int));
                    for (i = 0; i < size / 2; i++)
                        excl[i] = (2 * i) + 1;

                    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
                    MPI_Group_excl(world_group, size / 2, excl, &even_group);
                    MPI_Group_free(&world_group);
                    free(excl);

                    if (rank % 2 == 0) {
                        /* Even processes create a comm. for themselves */
                        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, comm);
                        intraCommName = "Even of WORLD created by MPI_Comm_create_group";
                    }
                    else {
                        *comm = MPI_COMM_NULL;
                    }

                    MPI_Group_free(&even_group);
                }
                else {
                    *comm = MPI_COMM_NULL;
                }
#else
                *comm = MPI_COMM_NULL;
#endif
            }
            break;
        case 7:
            {
                /* High half of the world using MPI_Comm_create */
                int ranges[1][3];
                MPI_Group world_group, high_group;
                MPI_Comm_size(MPI_COMM_WORLD, &size);
                MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                ranges[0][0] = size / 2;
                ranges[0][1] = size - 1;
                ranges[0][2] = 1;

                if (allowSmaller && (size + 1) / 2 >= min_size) {
                    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
                    merr = MPI_Group_range_incl(world_group, 1, ranges, &high_group);
                    if (merr)
                        MTestPrintError(merr);
                    merr = MPI_Comm_create(MPI_COMM_WORLD, high_group, comm);
                    if (merr)
                        MTestPrintError(merr);
                    MPI_Group_free(&world_group);
                    MPI_Group_free(&high_group);
                    intraCommName = "High half of WORLD created by MPI_Comm_create";
                }
                else {
                    *comm = MPI_COMM_NULL;
                }
            }
            break;
            /* These next cases are communicators that include some
             * but not all of the processes */
        case 8:
        case 9:
        case 10:
        case 11:
            {
                int newsize;
                merr = MPI_Comm_size(MPI_COMM_WORLD, &size);
                if (merr)
                    MTestPrintError(merr);
                newsize = size - (intraCommIdx - 7);

                if (allowSmaller && newsize >= min_size) {
                    merr = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
                    if (merr)
                        MTestPrintError(merr);
                    merr = MPI_Comm_split(MPI_COMM_WORLD, rank < newsize, rank, comm);
                    if (merr)
                        MTestPrintError(merr);
                    if (rank >= newsize) {
                        merr = MPI_Comm_free(comm);
                        if (merr)
                            MTestPrintError(merr);
                        *comm = MPI_COMM_NULL;
                    }
                    else {
                        intraCommName = "Split of WORLD";
                    }
                }
                else {
                    /* Act like default */
                    *comm = MPI_COMM_NULL;
                    intraCommIdx = -1;
                }
            }
            break;

            /* Other ideas: dup of self, cart comm, graph comm */
        default:
            *comm = MPI_COMM_NULL;
            intraCommIdx = -1;
            break;
        }

        if (*comm != MPI_COMM_NULL) {
            merr = MPI_Comm_size(*comm, &size);
            if (merr)
                MTestPrintError(merr);
            if (size >= min_size)
                done = 1;
        }
        else {
            intraCommName = "MPI_COMM_NULL";
            isBasic = 1;
            done = 1;
        }

        /* we are only done if all processes are done */
        MPI_Allreduce(MPI_IN_PLACE, &done, 1, MPI_INT, MPI_LAND, MPI_COMM_WORLD);

        /* Advance the comm index whether we are done or not, otherwise we could
         * spin forever trying to allocate a too-small communicator over and
         * over again. */
        intraCommIdx++;

        if (!done && !isBasic && *comm != MPI_COMM_NULL) {
            /* avoid leaking communicators */
            merr = MPI_Comm_free(comm);
            if (merr)
                MTestPrintError(merr);
        }
    }

    return intraCommIdx;
}
Esempio n. 6
0
/*
 * This test attempts collective bcast communication after a process in
 * the communicator has failed.
 */
int main(int argc, char **argv)
{
    int rank, size, rc, errclass, toterrs, errs = 0;
    int deadprocs[] = { 1 };
    char buf[100000];
    MPI_Group world, newgroup;
    MPI_Comm newcomm;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    MPI_Comm_group(MPI_COMM_WORLD, &world);
    MPI_Group_excl(world, 1, deadprocs, &newgroup);
    MPI_Comm_create_group(MPI_COMM_WORLD, newgroup, 0, &newcomm);

    if (size < 3) {
        fprintf(stderr, "Must run with at least 3 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    if (rank == 0) {
        strcpy(buf, "No Errors");
    }

    /* do a small bcast first */
    rc = MPI_Bcast(buf, 10, MPI_CHAR, 0, MPI_COMM_WORLD);

#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
    MPI_Error_class(rc, &errclass);
    if ((rc) && (errclass != MPIX_ERR_PROC_FAILED)) {
        fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n",
                errclass);
        errs++;
    }
#endif

    /* reset the non-root buffers */
    if (rank != 0)
        memset(buf, 0, sizeof(buf));

    /* do a larger bcast */
    rc = MPI_Bcast(buf, 100000, MPI_CHAR, 0, MPI_COMM_WORLD);

#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
    MPI_Error_class(rc, &errclass);
    if ((rc) && (errclass != MPIX_ERR_PROC_FAILED)) {
        fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n",
                errclass);
        errs++;
    }
#endif

    rc = MPI_Reduce(&errs, &toterrs, 1, MPI_INT, MPI_SUM, 0, newcomm);
    if (rc)
        fprintf(stderr, "Failed to get errors from other processes\n");

    if (rank == 0) {
        if (toterrs) {
            printf(" Found %d errors\n", toterrs);
        }
        else {
            printf(" No Errors\n");
        }
        fflush(stdout);
    }

    MPI_Group_free(&world);
    MPI_Group_free(&newgroup);
    MPI_Comm_free(&newcomm);
    MPI_Finalize();

    return 0;

}
Esempio n. 7
0
MTEST_THREAD_RETURN_TYPE test_idup(void *arg)
{
    int i;
    int size, rank;
    int ranges[1][3];
    int rleader, isLeft;
    int *excl = NULL;
    int tid = *(int *) arg;

    MPI_Group ingroup, high_group, even_group;
    MPI_Comm local_comm, inter_comm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MPI_Comm outcomm;
    MPI_Comm incomm = comms[tid];

    MPI_Comm_size(incomm, &size);
    MPI_Comm_rank(incomm, &rank);
    MPI_Comm_group(incomm, &ingroup);

    /* Idup incomm multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(incomm, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */
    /* Comm_dup */
    MPI_Comm_dup(incomm, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(incomm, rank % 2, size - rank, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of incomm */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(ingroup, 1, ranges, &high_group);
    MPI_Comm_create(incomm, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of incomm */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(ingroup, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(incomm, even_group, 0, &outcomm);
    }
    else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(incomm, (rank < size / 2), rank, &local_comm);
    if (rank == 0) {
        rleader = size / 2;
    }
    else if (rank == size / 2) {
        rleader = 0;
    }
    else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, incomm, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs[tid] += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs[tid] += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }
    MPI_Group_free(&ingroup);
    return NULL;
}
Esempio n. 8
0
/*
 * This test attempts collective communication after a process in
 * the communicator has failed.
 */
int main(int argc, char **argv)
{
    int rank, size, i, rc, errclass, toterrs, errs = 0;
    char rbuf[100000];
    char *sendbuf;
    int deadprocs[1] = {1};
    MPI_Group world, newgroup;
    MPI_Comm newcomm;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    if (size < 3) {
        fprintf( stderr, "Must run with at least 3 processes\n" );
        MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    /* try a small send first */
    sendbuf = (char *)malloc(10*size*sizeof(char));

    if (rank == 0) {
      for (i=0;i<size;i++) {
          strcpy(sendbuf + i*10, "No Errors");
      }
    }

    rc = MPI_Scatter(sendbuf, 10, MPI_CHAR, rbuf, 10, MPI_CHAR, 0, MPI_COMM_WORLD);

#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
    MPI_Error_class(rc, &errclass);
    if ((rc) && (errclass != MPIX_ERR_PROC_FAILED)) {
        fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n", errclass);
        errs++;
    }
#endif

    /* reset the buffers and try a larger scatter */
    free(sendbuf);
    memset(rbuf, 0, sizeof(rbuf));
    sendbuf = (char *)malloc(100000*size*sizeof(char));

    if (rank == 0) {
      for (i=0;i<size;i++) {
          strcpy(sendbuf + i*100000, "No Errors");
      }
    }

    rc = MPI_Scatter(sendbuf, 100000, MPI_CHAR, rbuf, 100000, MPI_CHAR, 0, MPI_COMM_WORLD);

#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
    MPI_Error_class(rc, &errclass);
    if ((rc) && (errclass != MPIX_ERR_PROC_FAILED)) {
        fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n", errclass);
        errs++;
    }
#endif

    MPI_Comm_group(MPI_COMM_WORLD, &world);
    MPI_Group_excl(world, 1, deadprocs, &newgroup);
    MPI_Comm_create_group(MPI_COMM_WORLD, newgroup, 0, &newcomm);

    rc = MPI_Reduce(&errs, &toterrs, 1, MPI_INT, MPI_SUM, 0, newcomm);
    if(rc)
        fprintf(stderr, "Failed to get errors from other processes\n");

    if (rank == 0) {
        if (toterrs) {
            printf( " Found %d errors\n", toterrs );
        }
        else {
            printf( " No Errors\n" );
        }
        fflush(stdout);
    }

    free(sendbuf);

    MPI_Finalize();

    return 0;
}