int main(int argc, char *argv[]) {
   int thread_support;
   char root_path[MPI_PMEM_MAX_ROOT_PATH];
   char *window_name = "test_window";
   void *win_data;
   MPI_Aint win_size = 1024;
   MPI_Win_pmem win;
   int error_code;
   int result = 0;

   MPI_Init_thread_pmem(&argc, &argv, MPI_THREAD_MULTIPLE, &thread_support);
   sprintf(root_path, "%s/0", argv[1]);
   MPI_Win_pmem_set_root_path(root_path);

   // Create window and 3 checkpoints.
   allocate_window(&win, &win_data, window_name, win_size);
   create_checkpoint(win, false);
   create_checkpoint(win, false);
   create_checkpoint(win, false);
   MPI_Win_free_pmem(&win);

   // Try to delete window version.
   MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
   error_code = MPI_Win_pmem_delete_version(window_name, 3);
   MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL);

   if (error_code != MPI_ERR_PMEM_CKPT_VER) {
      mpi_log_error("Error code is %d, expected %d.", error_code, MPI_ERR_PMEM_CKPT_VER);
      result = 1;
   }

   MPI_Finalize_pmem();

   return result;
}
int main(int argc, char *argv[]) {
   int thread_support;
   char root_path[MPI_PMEM_MAX_ROOT_PATH];
   MPI_Info info;
   MPI_Win_pmem win;
   char *window_name = "test_window";
   char *win_data;
   MPI_Aint win_size = 1024;
   int error_code;
   int result = 0;

   MPI_Init_thread_pmem(&argc, &argv, MPI_THREAD_MULTIPLE, &thread_support);
   sprintf(root_path, "%s/0", argv[1]);
   MPI_Win_pmem_set_root_path(root_path);

   // Allocate window.
   MPI_Info_create(&info);
   MPI_Info_set(info, "pmem_is_pmem", "true");
   MPI_Info_set(info, "pmem_name", window_name);
   MPI_Info_set(info, "pmem_mode", "checkpoint");
   MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
   error_code = MPI_Win_allocate_pmem(win_size, 1, info, MPI_COMM_WORLD, &win_data, &win);
   MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL);
   MPI_Info_free(&info);

   if (error_code != MPI_ERR_PMEM_NAME) {
      mpi_log_error("Error code is %d, expected %d.", error_code, MPI_ERR_PMEM_NAME);
      result = 1;
   }

   MPI_Finalize_pmem();

   return result;
}
Exemplo n.º 3
0
int main (int argc, char **argv)
{
    MPI_Errhandler errh;
    int wrank;
    MPI_Init (&argc, &argv);
    MPI_Comm_rank( MPI_COMM_WORLD, &wrank );
    MPI_Comm_create_errhandler((MPI_Comm_errhandler_function*)errf, &errh);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, errh);
    MPI_Comm_set_errhandler(MPI_COMM_SELF, errh);
    MPI_Errhandler_free(&errh);
    MPI_Finalize();
    /* Test harness requirement is that only one process write No Errors */
    if (wrank == 0) 
      printf(" No Errors\n");
    return 0;
}
Exemplo n.º 4
0
int main(int argc, char **argv)
{
    int err, errs = 0;

    MTest_Init(&argc, &argv);
    parse_args(argc, argv);

    /* To improve reporting of problems about operations, we
     * change the error handler to errors return */
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /* perform some tests */
    err = blockindexed_contig_test();
    if (err && verbose)
        fprintf(stderr, "%d errors in blockindexed test.\n", err);
    errs += err;

    err = blockindexed_vector_test();
    if (err && verbose)
        fprintf(stderr, "%d errors in blockindexed vector test.\n", err);
    errs += err;

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
int numProcsFails(MPI_Comm mcw){
	int rank, ret, numFailures = 0, flag;
        MPI_Group fGroup;
        MPI_Errhandler newEh;
        MPI_Comm dupComm;

        // Error handler
        MPI_Comm_create_errhandler(mpiErrorHandler, &newEh);

        MPI_Comm_rank(mcw, &rank);

        // Set error handler for communicator
        MPI_Comm_set_errhandler(mcw, newEh);

        // Target function
        if(MPI_SUCCESS != (ret = MPI_Comm_dup(mcw, &dupComm))) {
        //if(MPI_SUCCESS != (ret = MPI_Barrier(mcw))) { // MPI_Comm_dup or MPI_Barrier
           OMPI_Comm_failure_ack(mcw);
           OMPI_Comm_failure_get_acked(mcw, &fGroup);
           // Get the number of failures
           MPI_Group_size(fGroup, &numFailures);
        }// end of "MPI_Comm_dup failure"

        OMPI_Comm_agree(mcw, &flag);
        // Memory release
	if(numFailures > 0)
           MPI_Group_free(&fGroup);
        MPI_Errhandler_free(&newEh);

        return numFailures;
}//numProcsFails()
Exemplo n.º 6
0
/**
 * @brief Wrapper around MPI_Init
 *
 * We check the error code to detect MPI errors.
 *
 * This method also initializes the rank and size global variables.
 *
 * @param argc Pointer to the number of command line arguments
 * @param argv Pointer to the command line arguments
 */
inline void MyMPI_Init(int* argc, char*** argv) {
    int status = MPI_Init(argc, argv);
    if(status != MPI_SUCCESS) {
        std::cerr << "Failed to initialize MPI environment!" << std::endl;
        my_exit();
    }

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    MyMPI_Comm_rank(&MPIGlobal::rank);
    MyMPI_Comm_size(&MPIGlobal::size);
    MyMPI_Comm_local_vars(&MPIGlobal::local_rank, &MPIGlobal::local_size,
                          &MPIGlobal::noderank, &MPIGlobal::nodesize,
                          &MPIGlobal::nodecomm);

    // allocate the communication buffers
    // we initialize them with 0 size, applications needing it should increase
    // the size
    // by initializing it here, we are sure that the buffer will exist, so that
    // we can safely deallocate it again in MyMPI_Finalize
    MPIGlobal::sendsize = 0;
    MPIGlobal::sendbuffer = new char[MPIGlobal::sendsize];
    MPIGlobal::recvsize = 0;
    MPIGlobal::recvbuffer = new char[MPIGlobal::recvsize];
}
Exemplo n.º 7
0
int main(int argc, char *argv[])
{
    int color, key, ret;
    MPI_Errhandler new_eh;
    MPI_Comm comm_subset;

    /*
     * Startup MPI
     */
    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &mpi_mcw_rank);
    MPI_Comm_size(MPI_COMM_WORLD, &mpi_mcw_size);

    if( mpi_mcw_size < 2 ) {
        printf("Error: Must use at least 2 processes for this test\n");
        MPI_Finalize();
        return -1;
    }

    /*
     * Create a new error handler for MPI_COMM_WORLD
     * This overrides the default MPI_ERRORS_ARE_FATAL so that ranks in this
     * communicator will not automatically abort if a failure occurs.
     */
    MPI_Comm_create_errhandler(mpi_error_handler, &new_eh);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, new_eh);
    signal(SIGUSR2,  signal_handler);

    iterative_solver(MPI_COMM_WORLD);

    MPI_Finalize();

    return 0;
}
Exemplo n.º 8
0
int main(int argc, char *argv[])
{
    int err, errs = 0;

    /* Initialize MPI */
    MPI_Init(&argc, &argv);
    parse_args(argc, argv);

    /* To improve reporting of problems about operations, we
       change the error handler to errors return */
    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );

    err = hindexed_zerotype_test();
    if (verbose && err) fprintf(stderr, "error in hindexed_zerotype_test\n");
    errs += err;

    err = hindexed_sparsetype_test();
    if (verbose && err) fprintf(stderr, "error in hindexed_sparsetype_test\n");
    errs += err;

    /* print message and exit */
    if (errs) {
	fprintf(stderr, "Found %d errors\n", errs);
    }
    else {
	printf(" No Errors\n");
    }
    MPI_Finalize();
    return 0;
}
Exemplo n.º 9
0
int wc_mpi_init(int *argc, char ***argv)
{
	int rc = 0;
	int flag = 0;
	if (MPI_Finalized(&flag) == MPI_SUCCESS) {
		if (flag != 0) {
			WC_ERROR("MPI has already been finalized.\n");
			return -1;
		}
	}
	flag = 0;
	if (MPI_Initialized(&flag) == MPI_SUCCESS) {
		if (flag == 0) {
			rc = MPI_Init(argc, argv);
			WC_HANDLE_MPI_ERROR(MPI_Init, rc);
			if (rc == MPI_SUCCESS)
				rc |= MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
			return rc;
		} else {
			rc = 0;
		}
	} else {
		WC_HANDLE_MPI_ERROR(MPI_Initialized, rc);
	}
	return rc;
}
Exemplo n.º 10
0
int main(int argc, char **argv)
{
    int err, errs = 0;

    MTest_Init(&argc, &argv);
    parse_args(argc, argv);

    /* To improve reporting of problems about operations, we
     * change the error handler to errors return */
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /* perform some tests */
    err = darray_2d_c_test1();
    if (err && verbose)
        fprintf(stderr, "%d errors in 2d darray c test 1.\n", err);
    errs += err;

    err = darray_4d_c_test1();
    if (err && verbose)
        fprintf(stderr, "%d errors in 4d darray c test 1.\n", err);
    errs += err;

    /* print message and exit */
    /* Allow the use of more than one process - some MPI implementations
     * (including IBM's) check that the number of processes given to
     * Type_create_darray is no larger than MPI_COMM_WORLD */

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Exemplo n.º 11
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rc;
    int ranks[2];
    MPI_Group ng;
    char      str[MPI_MAX_ERROR_STRING+1];
    int       slen;

    MTest_Init( &argc, &argv );
    /* Set errors return */
    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );

    /* Create some valid input data except for the group handle */
    ranks[0] = 0;
    rc = MPI_Group_incl( MPI_COMM_WORLD, 1, ranks, &ng );
    if (rc == MPI_SUCCESS) {
	errs ++;
	printf( "Did not detect invalid handle (comm) in group_incl\n" );
    }
    else {
	if (verbose) {
	    MPI_Error_string( rc, str, &slen );
	    printf( "Found expected error; message is: %s\n", str );
	}
    }

    MTest_Finalize( errs );
    MPI_Finalize( );
    return 0;
}
Exemplo n.º 12
0
int main(int argc, char **argv)
{
    int err, errs = 0;

    MPI_Init(&argc, &argv); /* MPI-1.2 doesn't allow for MPI_Init(0,0) */
    parse_args(argc, argv);

    /* To improve reporting of problems about operations, we
       change the error handler to errors return */
    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );

    /* perform some tests */
    err = contig_test();
    if (err && verbose) fprintf(stderr, "%d errors in contig test.\n",
				err);
    errs += err;

    /* print message and exit */
    if (errs) {
	fprintf(stderr, "Found %d errors\n", errs);
    }
    else {
	printf(" No Errors\n");
    }
    MPI_Finalize();
    return 0;
}
Exemplo n.º 13
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int dims[2];
    int periods[2];
    int size, rank;
    MPI_Comm comm;

    MTest_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    dims[0] = size;
    dims[1] = size;
    periods[0] = 0;
    periods[1] = 0;

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    err = MPI_Cart_create(MPI_COMM_WORLD, 2, dims, periods, 0, &comm);
    if (err == MPI_SUCCESS) {
        errs++;
        printf("Cart_create returned success when dims > size\n");
    } else if (comm != MPI_COMM_NULL) {
        errs++;
        printf("Expected a null comm from cart create\n");
    }

    MTest_Finalize(errs);


    return MTestReturnValue(errs);
}
Exemplo n.º 14
0
int main(int argc, char **argv)
{
    int err, errs = 0;

    MPI_Init(&argc, &argv); /* MPI-1.2 doesn't allow for MPI_Init(0,0) */
    parse_args(argc, argv);

    /* To improve reporting of problems about operations, we
       change the error handler to errors return */
    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );

    /* perform some tests */
    err = builtin_float_test();
    errs += err;
    if (err) {
	fprintf(stderr, "Found %d errors in builtin float test.\n", err);
    }

    err = vector_of_vectors_test();
    errs += err;
    if (err) {
	fprintf(stderr, "Found %d errors in vector of vectors test.\n", err);
    }

    err = optimizable_vector_of_basics_test();
    errs += err;
    if (err) {
	fprintf(stderr, "Found %d errors in vector of basics test.\n", err);
    }

    err = indexed_of_basics_test();
    errs += err;
    if (err) {
	fprintf(stderr, "Found %d errors in indexed of basics test.\n", err);
    }

    err = indexed_of_vectors_test();
    errs += err;
    if (err) {
	fprintf(stderr, "Found %d errors in indexed of vectors test.\n", err);
    }

#ifdef HAVE_MPI_TYPE_CREATE_STRUCT
    err = struct_of_basics_test();
    errs += err;
#endif

    /* print message and exit */
    if (errs) {
	fprintf(stderr, "Found %d errors\n", errs);
    }
    else {
	printf(" No Errors\n");
    }
    MPI_Finalize();
    return 0;
}
Exemplo n.º 15
0
int main(int argc, char *argv[])
{
    int wrank, wsize, rank, size, color;
    int tmp, errs = 0;
    MPI_Comm newcomm;

    MPI_Init(&argc, &argv);

    MPI_Comm_size(MPI_COMM_WORLD, &wsize);
    MPI_Comm_rank(MPI_COMM_WORLD, &wrank);

    /* Color is 0 or 1; 1 will be the processes that "fault" */
    /* process 0 and wsize/2+1...wsize-1 are in non-faulting group */
    color = (wrank > 0) && (wrank <= wsize / 2);
    MPI_Comm_split(MPI_COMM_WORLD, color, wrank, &newcomm);

    MPI_Comm_size(newcomm, &size);
    MPI_Comm_rank(newcomm, &rank);

    /* Set errors return on COMM_WORLD and the new comm */
    MPI_Comm_set_errhandler(MPI_ERRORS_RETURN, MPI_COMM_WORLD);
    MPI_Comm_set_errhandler(MPI_ERRORS_RETURN, newcomm);

    MPI_Barrier(MPI_COMM_WORLD);
    if (color) {
        /* Simulate a fault on some processes */
        exit(1);
    }

    /* Can we still use newcomm? */
    MPI_Allreduce(&rank, &tmp, 1, MPI_INT, MPI_SUM, newcomm);
    if (tmp != (size * (size + 1)) / 2) {
        printf("Allreduce gave %d but expected %d\n", tmp, (size * (size + 1)) / 2);
        errs++;
    }

    MPI_Comm_free(&newcomm);
    MPI_Finalize();

    printf(" No Errors\n");

    return 0;
}
Exemplo n.º 16
0
int main(int argc, char **argv)
{
    int rank, nproc, mpi_errno;
    int i, ncomm, *ranks;
    int errs = 1;
    MPI_Comm *comm_hdls;
    MPI_Group world_group;

    MTest_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    comm_hdls = malloc(sizeof(MPI_Comm) * MAX_NCOMM);
    ranks = malloc(sizeof(int) * nproc);

    ncomm = 0;
    for (i = 0; i < MAX_NCOMM; i++) {
        int incl = i % nproc;
        MPI_Group comm_group;

        /* Comms include ranks: 0; 1; 2; ...; 0; 1; ... */
        MPI_Group_incl(world_group, 1, &incl, &comm_group);

        /* Note: the comms we create all contain one rank from MPI_COMM_WORLD */
        mpi_errno = MPI_Comm_create(MPI_COMM_WORLD, comm_group, &comm_hdls[i]);

        if (mpi_errno == MPI_SUCCESS) {
            if (verbose)
                printf("%d: Created comm %d\n", rank, i);
            ncomm++;
        } else {
            if (verbose)
                printf("%d: Error creating comm %d\n", rank, i);
            MPI_Group_free(&comm_group);
            errs = 0;
            break;
        }

        MPI_Group_free(&comm_group);
    }

    for (i = 0; i < ncomm; i++)
        MPI_Comm_free(&comm_hdls[i]);

    free(comm_hdls);
    free(ranks);
    MPI_Group_free(&world_group);

    MTest_Finalize(errs);

    return MTestReturnValue(errs);
}
Exemplo n.º 17
0
int main(int argc, char **argv) {
    int       rank, nproc, mpi_errno;
    int       i, ncomm, *ranks;
    int       errors = 1;
    MPI_Comm *comm_hdls;
    MPI_Group world_group;

    MPI_Init(&argc, &argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
    comm_hdls = malloc(sizeof(MPI_Comm) * MAX_NCOMM);
    ranks     = malloc(sizeof(int) * nproc);

    for (i = 0; i < nproc; i++)
        ranks[i] = i;

    ncomm = 0;
    for (i = 0; i < MAX_NCOMM; i++) {
        MPI_Group comm_group;

        /* Comms include ranks: 0; 0,1; 0,1,2; ...; 0; 0,1; 0,1,2; ... */
        MPI_Group_incl(world_group, (i+1) % (nproc+1), /* Adding 1 yields counts of 1..nproc */
                       ranks, &comm_group);

        /* Note: the comms we create are all varying subsets of MPI_COMM_WORLD */
        mpi_errno = MPI_Comm_create(MPI_COMM_WORLD, comm_group, &comm_hdls[i]);

        if (mpi_errno == MPI_SUCCESS) {
            ncomm++;
        } else {
            if (verbose) printf("%d: Error creating comm %d\n", rank, i);
            MPI_Group_free(&comm_group);
            errors = 0;
            break;
        }

        MPI_Group_free(&comm_group);
    }

    for (i = 0; i < ncomm; i++)
        MPI_Comm_free(&comm_hdls[i]);

    free(comm_hdls);
    MPI_Group_free(&world_group);

    MTest_Finalize(errors);
    MPI_Finalize();

    return 0;
}
Exemplo n.º 18
0
int main(int argc, char **argv)
{
    int rank, size, rc, ec, errs = 0;
    int flag = 1;
    MPI_Comm dup, shrunk;

    MPI_Init(&argc, &argv);
    MPI_Comm_dup(MPI_COMM_WORLD, &dup);
    MPI_Comm_rank(dup, &rank);
    MPI_Comm_size(dup, &size);
    MPI_Comm_set_errhandler(dup, MPI_ERRORS_RETURN);

    if (size < 4) {
        fprintf(stderr, "Must run with at least 4 processes\n");
        MPI_Abort(dup, 1);
    }

    if (2 == rank) exit(EXIT_FAILURE);

    if (MPI_SUCCESS == (rc = MPIX_Comm_agree(dup, &flag))) {
        MPI_Error_class(rc, &ec);
        fprintf(stderr, "[%d] Expected MPIX_ERR_PROC_FAILED after agree. Received: %d\n", rank, ec);
        errs++;
        MPI_Abort(dup, 1);
    }

    if (MPI_SUCCESS != (rc = MPIX_Comm_shrink(dup, &shrunk))) {
        MPI_Error_class(rc, &ec);
        fprintf(stderr, "[%d] Expected MPI_SUCCESS after shrink. Received: %d\n", rank, ec);
        errs++;
        MPI_Abort(dup, 1);
    }

    if (MPI_SUCCESS != (rc = MPIX_Comm_agree(shrunk, &flag))) {
        MPI_Error_class(rc, &ec);
        fprintf(stderr, "[%d] Expected MPI_SUCCESS after agree. Received: %d\n", rank, ec);
        errs++;
        MPI_Abort(dup, 1);
    }

    MPI_Comm_free(&shrunk);
    MPI_Comm_free(&dup);

    if (0 == rank) {
        if (errs)
            fprintf(stdout, " Found %d errors\n", errs);
        else
            fprintf(stdout, " No errors\n");
    }

    MPI_Finalize();
}
Exemplo n.º 19
0
int main(int argc, char** argv)
{
    Dune::MPIHelper::instance(argc, argv);

#if defined(HAVE_MPI) && HAVE_MPI
    MPI_Errhandler errhandler;
    MPI_Comm_create_errhandler(MPI_err_handler, &errhandler);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, errhandler);
#endif // HAVE_MPI

    boost::unit_test::unit_test_main(&init_unit_test_func,
                                     argc, argv);
}
Exemplo n.º 20
0
int main(int argc, char **argv)
{
    int err, errs = 0;

    MTest_Init(&argc, &argv);
    parse_args(argc, argv);

    /* To improve reporting of problems about operations, we
     * change the error handler to errors return */
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    /* perform some tests */
    err = subarray_1d_c_test1();
    if (err && verbose)
        fprintf(stderr, "%d errors in 1d subarray c test 1.\n", err);
    errs += err;

    err = subarray_1d_fortran_test1();
    if (err && verbose)
        fprintf(stderr, "%d errors in 1d subarray fortran test 1.\n", err);
    errs += err;

    err = subarray_2d_c_test1();
    if (err && verbose)
        fprintf(stderr, "%d errors in 2d subarray c test 1.\n", err);
    errs += err;

    err = subarray_2d_fortran_test1();
    if (err && verbose)
        fprintf(stderr, "%d errors in 2d subarray fortran test 1.\n", err);
    errs += err;

    err = subarray_2d_c_test2();
    if (err && verbose)
        fprintf(stderr, "%d errors in 2d subarray c test 2.\n", err);
    errs += err;

    err = subarray_4d_c_test1();
    if (err && verbose)
        fprintf(stderr, "%d errors in 4d subarray c test 1.\n", err);
    errs += err;

    err = subarray_4d_fortran_test1();
    if (err && verbose)
        fprintf(stderr, "%d errors in 4d subarray fortran test 1.\n", err);
    errs += err;

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
Exemplo n.º 21
0
int main(int argc, char *argv[]) {
   int thread_support;
   MPI_Info info;
   char parsed[MPI_PMEM_MAX_NAME];
   int error_code;
   int result = 0;

   MPI_Init_thread_pmem(&argc, &argv, MPI_THREAD_MULTIPLE, &thread_support);

   MPI_Info_create(&info);
   MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
   error_code = parse_mpi_info_name(MPI_COMM_WORLD, info, parsed);
   MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_ARE_FATAL);
   MPI_Info_free(&info);

   if (error_code != MPI_ERR_PMEM_NAME) {
      mpi_log_error("Error code is %d, expected %d.", error_code, MPI_ERR_PMEM_NAME);
      result = 1;
   }

   MPI_Finalize_pmem();

   return result;
}
Exemplo n.º 22
0
void mpi_comm_set_errhandler_f(MPI_Fint *comm, MPI_Fint *errhandler,
			       MPI_Fint *ierr)
{
    MPI_Comm c_comm;
    MPI_Errhandler c_errhandler;

    c_comm = MPI_Comm_f2c(*comm);
    c_errhandler = MPI_Errhandler_f2c(*errhandler);

    *ierr = OMPI_INT_2_FINT(MPI_Comm_set_errhandler(c_comm, c_errhandler));
    if ( MPI_SUCCESS == OMPI_FINT_2_INT(*ierr) &&
	 OMPI_ERRHANDLER_TYPE_PREDEFINED != c_errhandler->eh_mpi_object_type ) {
	c_errhandler->eh_fortran_function = true ;
    }
}
Exemplo n.º 23
0
/*
 * This test attempts collective communication after a process in
 * the communicator has failed. Since all processes contribute to
 * the result of the operation, all process will receive an error.
 */
int main(int argc, char **argv)
{
    int rank, size, err, errclass;
    int sendbuf[1] = { 42 };
    int recvbuf[1];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    if (size < 3) {
        fprintf(stderr, "Must run with at least 3 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    err = MPI_Reduce(sendbuf, recvbuf, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0) {
#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
        MPI_Error_class(err, &errclass);
        if (errclass == MPIX_ERR_PROC_FAILED) {
            printf(" No Errors\n");
            fflush(stdout);
        }
        else {
            fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n",
                    errclass);
        }
#else
        if (err) {
            printf(" No Errors\n");
            fflush(stdout);
        }
        else {
            fprintf(stderr, "Program reported MPI_SUCCESS, but an error code was expected.\n");
        }
#endif
    }

    MPI_Finalize();

    return 0;
}
Exemplo n.º 24
0
/**
 * Initialize group linked list. Prepopulate with world group.
 */
void comex_group_init() 
{
    /* create the head of the group linked list */
    assert(group_list == NULL);
    group_list = malloc(sizeof(comex_igroup_t));
    group_list->id = COMEX_GROUP_WORLD;
    group_list->next = NULL;
    group_list->win_list = NULL;
#ifdef USE_MPI_ERRORS_RETURN
    MPI_Comm_set_errhandler(MPI_COMM_WORLD,MPI_ERRORS_RETURN);
#endif

    /* save MPI world group and communicatior in COMEX_GROUP_WORLD */
    group_list->comm = l_state.world_comm;
    MPI_Comm_group(group_list->comm, &(group_list->group));
}
Exemplo n.º 25
0
/*
 * This test ensures that shrink works correctly
 */
int main(int argc, char **argv)
{
    int rank, size, newsize, rc, errclass, errs = 0;
    MPI_Comm newcomm;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    if (size < 4) {
        fprintf(stderr, "Must run with at least 4 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (2 == rank)
        exit(EXIT_FAILURE);

    rc = MPIX_Comm_shrink(MPI_COMM_WORLD, &newcomm);
    if (rc) {
        MPI_Error_class(rc, &errclass);
        fprintf(stderr, "Expected MPI_SUCCESS from MPIX_Comm_shrink. Received: %d\n", errclass);
        errs++;
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_size(newcomm, &newsize);
    if (newsize != size - 1)
        errs++;

    rc = MPI_Barrier(newcomm);
    if (rc) {
        MPI_Error_class(rc, &errclass);
        fprintf(stderr, "Expected MPI_SUCCESS from MPI_BARRIER. Received: %d\n", errclass);
        errs++;
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_free(&newcomm);

    if (0 == rank)
        fprintf(stdout, " No Errors\n");

    MPI_Finalize();

    return 0;
}
Exemplo n.º 26
0
int main(int argc, char *argv[])
{
    int p, r;
    int errs = 0;
    int err;
    int i, j, nLoop = 1;
    MPI_Datatype newtype;

    MTest_Init(0, 0);

    if (argc > 1) {
        nLoop = atoi(argv[1]);
    }
    /* Set the handler to errors return, since according to the
     * standard, it is invalid to provide p and/or r that are unsupported */

    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    for (i = 0; i < nLoop; i++) {

        /* These should be a valid type similar to MPI_REAL and MPI_REAL8 */
        for (j = 0; j < 6; j++) {
            p = cases[j][0];
            r = cases[j][1];
            err = MPI_Type_create_f90_real(p, r, &newtype);
            errs += checkType("REAL", p, r, MPI_COMBINER_F90_REAL, err, newtype);
        }

        /* These should be a valid type similar to MPI_COMPLEX and MPI_COMPLEX8 */
        for (j = 0; j < 6; j++) {
            p = cases[j][0];
            r = cases[j][1];
            err = MPI_Type_create_f90_complex(p, r, &newtype);
            errs += checkType("COMPLEX", p, r, MPI_COMBINER_F90_COMPLEX, err, newtype);
        }

        /* This should be a valid type similar to MPI_INTEGER */
        p = 3;
        r = 10;
        err = MPI_Type_create_f90_integer(p, &newtype);
        errs += checkType("INTEGER", p, r, MPI_COMBINER_F90_INTEGER, err, newtype);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Exemplo n.º 27
0
int main(int argc, char *argv[]) {
    int rank, size, i;
    int sum = 0, val = 1;
    int errs = 0;
    MPI_Errhandler errhandler;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    if (size < 4) {
        fprintf(stderr, "Must run with at least 4 processes.\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Comm_dup(MPI_COMM_WORLD, &comm_all);

    MPI_Comm_create_errhandler(&error_handler, &errhandler);
    MPI_Comm_set_errhandler(comm_all, errhandler);

    for (i = 0; i < 10; ++i) {
        MPI_Comm_size(comm_all, &size);
        sum = 0;
        if (i == 5 && rank == 1) {
            exit(1);
        } else if (i != 5) {
            MPI_Allreduce(&val, &sum, 1, MPI_INT, MPI_SUM, comm_all);
            if (sum != size && rank == 0) {
                errs++;
                fprintf(stderr, "Incorrect answer: %d != %d\n", sum, size);
            }
        }
    }

    if (0 == rank && errs) {
        fprintf(stdout, " Found %d errors\n", errs);
    } else if (0 == rank) {
        fprintf(stdout, " No errors\n");
    }

    MPI_Comm_free(&comm_all);
    MPI_Errhandler_free(&errhandler);

    MPI_Finalize();

    return 0;
}
Exemplo n.º 28
0
int main(int argc, char *argv[])
{
    int err, errs = 0;

    MTest_Init(&argc, &argv);
    parse_args(argc, argv);

    /* To improve reporting of problems about operations, we
     * change the error handler to errors return */
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    err = short_int_pack_test();
    errs += err;

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
Exemplo n.º 29
0
/*
 * This test attempts MPI_Recv with the source being a dead process. It should fail
 * and return an error. If we are testing sufficiently new MPICH, we look for the
 * MPIX_ERR_PROC_FAILED error code. These should be converted to look for the
 * standarized error code once it is finalized.
 */
int main(int argc, char **argv)
{
    int rank, size, err, errclass, toterrs = 0;
    char buf[10];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    if (size < 2) {
        fprintf(stderr, "Must run with at least 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    if (rank == 1) {
        exit(EXIT_FAILURE);
    }

    if (rank == 0) {
        MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
        err = MPI_Recv(buf, 1, MPI_CHAR, 1, 0, MPI_COMM_WORLD, MPI_STATUS_IGNORE);
#if defined (MPICH) && (MPICH_NUMVERSION >= 30100102)
        MPI_Error_class(err, &errclass);
        if (errclass == MPIX_ERR_PROC_FAILED) {
            printf(" No Errors\n");
            fflush(stdout);
        } else {
            fprintf(stderr, "Wrong error code (%d) returned. Expected MPIX_ERR_PROC_FAILED\n",
                    errclass);
            toterrs++;
        }
#else
        if (err) {
            printf(" No Errors\n");
            fflush(stdout);
        } else {
            fprintf(stderr, "Program reported MPI_SUCCESS, but an error code was expected.\n");
            toterrs++;
        }
#endif
    }

    MPI_Finalize();

    return MTestReturnValue(toterrs);
}
Exemplo n.º 30
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    char port_name[MPI_MAX_PORT_NAME], serv_name[256];

    MTest_Init( &argc, &argv );

    strcpy( port_name, "otherhost:122" );
    strcpy( serv_name, "MyTest" );

    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );

    MPI_Unpublish_name( serv_name, MPI_INFO_NULL, port_name );

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
  
}