示例#1
0
int main( int argc, char *argv[] )
{
    int buf[2];
    MPI_Win        win;
    MPI_Errhandler newerr;
    int            i;

    MTest_Init( &argc, &argv );

    /* Run this test multiple times to expose storage leaks (we found a leak
       of error handlers with this test) */
    for (i=0;i<1000; i++)  {
	calls = 0;
	
	MPI_Win_create( buf, 2*sizeof(int), sizeof(int), 
			MPI_INFO_NULL, MPI_COMM_WORLD, &win );
	mywin = win;
	
	MPI_Win_create_errhandler( eh, &newerr );
	
	MPI_Win_set_errhandler( win, newerr );
	MPI_Win_call_errhandler( win, MPI_ERR_OTHER );
	MPI_Errhandler_free( &newerr );
	if (calls != 1) {
	    errs++;
	    printf( "Error handler not called\n" );
	}
	MPI_Win_free( &win );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
示例#2
0
文件: rmazero.c 项目: NexMirror/MPICH
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size;
    int *buf, bufsize;
    int *result;
    int *rmabuf, rsize, rcount;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Request req;
    MPI_Datatype derived_dtp;

    MTest_Init(&argc, &argv);

    bufsize = 256 * sizeof(int);
    buf = (int *) malloc(bufsize);
    if (!buf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    result = (int *) malloc(bufsize);
    if (!result) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    rcount = 16;
    rsize = rcount * sizeof(int);
    rmabuf = (int *) malloc(rsize);
    if (!rmabuf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Type_contiguous(2, MPI_INT, &derived_dtp);
    MPI_Type_commit(&derived_dtp);

    /* The following loop is used to run through a series of communicators
     * that are subsets of MPI_COMM_WORLD, of size 1 or greater. */
    while (MTestGetIntracommGeneral(&comm, 1, 1)) {
        int count = 0;

        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);

        MPI_Win_create(buf, bufsize, 2 * sizeof(int), MPI_INFO_NULL, comm, &win);
        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

        /** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
        MPI_Win_fence(0, win);

        TEST_FENCE_OP("Put", MPI_Put(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);

        TEST_FENCE_OP("Get", MPI_Get(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);
示例#3
0
int main(int argc, char *argv[])
{
    int          rank, nproc;
    int          errors = 0, all_errors = 0;
    int          buf, my_buf;
    MPI_Win      win;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    MPI_Win_create(&buf, sizeof(int), sizeof(int),
                    MPI_INFO_NULL, MPI_COMM_WORLD, &win);

    MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

    MPI_Win_fence(0, win);

    MPI_Win_lock(MPI_LOCK_SHARED, 0, MPI_MODE_NOCHECK, win);
    MPI_Get(&my_buf, 1, MPI_INT, 0, 0, 1, MPI_INT, win);
    MPI_Win_unlock(0, win);

    /* This should fail because the window is no longer in a fence epoch */
    CHECK_ERR(MPI_Get(&my_buf, 1, MPI_INT, 0, 0, 1, MPI_INT, win));

    MPI_Win_fence(0, win);
    MPI_Win_free(&win);

    MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0 && all_errors == 0) printf(" No Errors\n");
    MPI_Finalize();

    return 0;
}
示例#4
0
int main(int argc, char *argv[])
{
    int rank;
    int errors = 0, all_errors = 0;
    int buf;
    MPI_Win win;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

    MPI_Win_create(&buf, sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);

    MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

    /* This should fail because the window is not locked. */
    CHECK_ERR(MPI_Win_unlock(0, win));

    MPI_Win_free(&win);

    MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0 && all_errors == 0)
        printf(" No Errors\n");
    MPI_Finalize();

    return 0;
}
void mpi_win_set_errhandler_f(MPI_Fint *win, MPI_Fint *errhandler,
			      MPI_Fint *ierr)
{
    MPI_Win c_win = MPI_Win_f2c(*win);
    MPI_Errhandler c_err = MPI_Errhandler_f2c(*errhandler);

    *ierr = OMPI_INT_2_FINT(MPI_Win_set_errhandler(c_win, c_err));
}
示例#6
0
/*
 * Class:     mpi_Win
 * Method:    setErrhandler
 * Signature: (JJ)V
 */
JNIEXPORT void JNICALL Java_mpi_Win_setErrhandler(
        JNIEnv *env, jobject jthis, jlong win, jlong errhandler)
{
    int rc = MPI_Win_set_errhandler(
             (MPI_Win)win, (MPI_Errhandler)MPI_ERRORS_RETURN);

    ompi_java_exceptionCheck(env, rc);
}
示例#7
0
文件: rmanull.c 项目: NexMirror/MPICH
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size;
    int *buf, bufsize;
    int *result;
    int *rmabuf, rsize, rcount;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Request req;

    MTest_Init(&argc, &argv);

    bufsize = 256 * sizeof(int);
    buf = (int *) malloc(bufsize);
    if (!buf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    result = (int *) malloc(bufsize);
    if (!result) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    rcount = 16;
    rsize = rcount * sizeof(int);
    rmabuf = (int *) malloc(rsize);
    if (!rmabuf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, 1, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);

        MPI_Win_create(buf, bufsize, sizeof(int), MPI_INFO_NULL, comm, &win);
        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

        /** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
        MPI_Win_fence(0, win);

        TEST_FENCE_OP("Put",
                      MPI_Put(rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win);
);

        TEST_FENCE_OP("Get",
                      MPI_Get(rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win);
);
示例#8
0
void ompi_win_set_errhandler_f(MPI_Fint *win, MPI_Fint *errhandler,
			      MPI_Fint *ierr)
{
    int c_ierr;
    MPI_Win c_win = MPI_Win_f2c(*win);
    MPI_Errhandler c_err = MPI_Errhandler_f2c(*errhandler);

    c_ierr = MPI_Win_set_errhandler(c_win, c_err);
    if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr);
}
示例#9
0
int main(int argc, char *argv[])
{
    int          rank, nproc, i;
    int          errors = 0, all_errors = 0;
    int          buf, *my_buf;
    MPI_Win      win;
    MPI_Group    world_group;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    MPI_Win_create(&buf, sizeof(int), sizeof(int),
                    MPI_INFO_NULL, MPI_COMM_WORLD, &win);

    MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
    MPI_Win_post(world_group, 0, win);
    MPI_Win_start(world_group, 0, win);

    my_buf = malloc(nproc*sizeof(int));

    for (i = 0; i < nproc; i++) {
        MPI_Get(&my_buf[i], 1, MPI_INT, i, 0, 1, MPI_INT, win);
    }

    /* This should fail, because the window is in an active target access epoch. */
    CHECK_ERR(MPI_Win_start(world_group, 0, win));

    MPI_Win_complete(win);

    /* This should fail, because the window is not in an active target access epoch. */
    CHECK_ERR(MPI_Win_complete(win));

    MPI_Win_wait(win);
    MPI_Win_free(&win);

    free(my_buf);
    MPI_Group_free(&world_group);

    MPI_Reduce(&errors, &all_errors, 1, MPI_INT, MPI_SUM, 0, MPI_COMM_WORLD);

    if (rank == 0 && all_errors == 0) printf(" No Errors\n");
    MPI_Finalize();

    return 0;
}
示例#10
0
int main(int argc, char *argv[])
{
    int err;
    int buf[2];
    MPI_Win win;
    MPI_Comm comm;
    MPI_Errhandler newerr, olderr;

    MTEST_VG_MEM_INIT(buf, 2 * sizeof(int));

    MTest_Init(&argc, &argv);
    comm = MPI_COMM_WORLD;
    MPI_Win_create_errhandler(weh, &newerr);

    MPI_Win_create(buf, 2 * sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win);

    mywin = win;
    MPI_Win_get_errhandler(win, &olderr);
    if (olderr != MPI_ERRORS_ARE_FATAL) {
        errs++;
        printf("Expected errors are fatal\n");
    }

    MPI_Win_set_errhandler(win, newerr);

    expected_err_class = MPI_ERR_RANK;
    err = MPI_Put(buf, 1, MPI_INT, -5, 0, 1, MPI_INT, win);
    if (calls != 1) {
        errs++;
        printf("newerr not called\n");
        calls = 1;
    }
    expected_err_class = MPI_ERR_OTHER;
    MPI_Win_call_errhandler(win, MPI_ERR_OTHER);
    if (calls != 2) {
        errs++;
        printf("newerr not called (2)\n");
    }

    MPI_Win_free(&win);
    MPI_Errhandler_free(&newerr);

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
示例#11
0
void Set_Errhand(struct comm_info* c_info)
{
#ifdef SET_ERRH

if( c_info->communicator != MPI_COMM_NULL )
MPI_Errhandler_set(c_info->communicator, c_info->ERR);

#ifdef EXT
if( c_info->WIN != MPI_WIN_NULL )
MPI_Win_set_errhandler(c_info->WIN, c_info->ERRW);
#endif

#ifdef MPIIO
if( c_info->fh != MPI_FILE_NULL )
MPI_File_set_errhandler(c_info->fh, c_info->ERRF);
#endif

#endif
}
示例#12
0
int main(int argc, char *argv[])
{
    int rank, nproc, i;
    int errors = 0, errs = 0;
    int buf = 0, *my_buf;
    MPI_Win win;
    MPI_Group world_group;

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &nproc);

    MPI_Win_create(&buf, sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win);

    MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

    MPI_Comm_group(MPI_COMM_WORLD, &world_group);
    MPI_Win_post(world_group, 0, win);
    MPI_Win_start(world_group, 0, win);

    my_buf = malloc(nproc * sizeof(int));

    for (i = 0; i < nproc; i++) {
        MPI_Get(&my_buf[i], 1, MPI_INT, i, 0, 1, MPI_INT, win);
    }

    /* This should fail, because the window is in an active target epoch. */
    CHECK_ERR(MPI_Win_free(&win));

    MPI_Win_complete(win);
    MPI_Win_wait(win);

    MPI_Win_free(&win);

    free(my_buf);
    MPI_Group_free(&world_group);

    MTest_Finalize(errors);

    return MTestReturnValue(errs);
}
示例#13
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Aint extent;
    MTestDatatype sendtype, recvtype;

    MTest_Init(&argc, &argv);

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        source = 0;
        dest = size - 1;

        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
                /* Make sure that everyone has a recv buffer */
                recvtype.InitBuf(&recvtype);

                MPI_Type_extent(recvtype.datatype, &extent);
                MPI_Win_create(recvtype.buf, recvtype.count * extent,
                               (int) extent, MPI_INFO_NULL, comm, &win);
                MPI_Win_fence(0, win);
                if (rank == source) {
                    sendtype.InitBuf(&sendtype);

                    /* To improve reporting of problems about operations, we
                     * change the error handler to errors return */
                    MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

                    /* MPI_REPLACE on accumulate is almost the same
                     * as MPI_Put; the only difference is in the
                     * handling of overlapping accumulate operations,
                     * which are not tested here */
                    err = MPI_Accumulate(sendtype.buf, sendtype.count,
                                         sendtype.datatype, dest, 0,
                                         recvtype.count, recvtype.datatype, MPI_REPLACE, win);
                    if (err) {
                        errs++;
                        if (errs < 10) {
                            printf("Accumulate types: send %s, recv %s\n",
                                   MTestGetDatatypeName(&sendtype),
                                   MTestGetDatatypeName(&recvtype));
                            MTestPrintError(err);
                        }
                    }
                    err = MPI_Win_fence(0, win);
                    if (err) {
                        errs++;
                        if (errs < 10) {
                            MTestPrintError(err);
                        }
                    }
                }
                else if (rank == dest) {
                    MPI_Win_fence(0, win);
                    /* This should have the same effect, in terms of
                     * transfering data, as a send/recv pair */
                    err = MTestCheckRecv(0, &recvtype);
                    if (err) {
                        errs += err;
                    }
                }
                else {
                    MPI_Win_fence(0, win);
                }
                MPI_Win_free(&win);
                MTestFreeDatatype(&sendtype);
                MTestFreeDatatype(&recvtype);
            }
        }
        MTestFreeComm(&comm);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
示例#14
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Win       win;
    MPI_Aint      extent;
    MPI_Group     wingroup, neighbors;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
		/* Make sure that everyone has a recv buffer */
		recvtype.InitBuf( &recvtype );

		MPI_Type_extent( recvtype.datatype, &extent );
		MPI_Win_create( recvtype.buf, recvtype.count * extent, 
				(int)extent, MPI_INFO_NULL, comm, &win );
		MPI_Win_get_group( win, &wingroup );
		if (rank == source) {
		    /* To improve reporting of problems about operations, we
		       change the error handler to errors return */
		    MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN );
		    sendtype.InitBuf( &sendtype );
		    
		    /* Neighbor is dest only */
		    MPI_Group_incl( wingroup, 1, &dest, &neighbors );
		    err = MPI_Win_start( neighbors, 0, win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		    MPI_Group_free( &neighbors );
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				    sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) {
			errs++;
			MTestPrintError( err );
		    }
		    err = MPI_Win_complete( win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		}
		else if (rank == dest) {
		    MPI_Group_incl( wingroup, 1, &source, &neighbors );
		    MPI_Win_post( neighbors, 0, win );
		    MPI_Group_free( &neighbors );
		    MPI_Win_wait( win );
		    /* This should have the same effect, in terms of
		       transfering data, as a send/recv pair */
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) {
			errs += errs;
		    }
		}
		else {
		    /* Nothing; the other processes need not call any 
		       MPI routines */
		    ;
		}
		MPI_Win_free( &win );
		MTestFreeDatatype( &sendtype );
		MTestFreeDatatype( &recvtype );
		MPI_Group_free( &wingroup );
	    }
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
示例#15
0
int main( int argc, char **argv )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Win       win;
    MPI_Aint      extent;
    MTestDatatype sendtype, recvtype;
    int           onlyInt = 0;

    MTest_Init( &argc, &argv );
    /* Check for a simple choice of communicator and datatypes */
    if (getenv( "MTEST_SIMPLE" )) onlyInt = 1;

    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {

		MTestPrintfMsg( 1, 
		       "Putting count = %d of sendtype %s receive type %s\n", 
				count, MTestGetDatatypeName( &sendtype ),
				MTestGetDatatypeName( &recvtype ) );

		/* Make sure that everyone has a recv buffer */
		recvtype.InitBuf( &recvtype );

		MPI_Type_extent( recvtype.datatype, &extent );
		MPI_Win_create( recvtype.buf, recvtype.count * extent, 
				extent, MPI_INFO_NULL, comm, &win );
		/* To improve reporting of problems about operations, we
		   change the error handler to errors return */
		MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN );

		/* At this point, we have all of the elements that we 
		   need to begin the multiple fence and put tests */
		/* Fence 1 */
		err = MPI_Win_fence( MPI_MODE_NOPRECEDE, win ); 
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* Source puts */
		if (rank == source) {
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}

		/* Fence 2 */
		err = MPI_Win_fence( 0, win );
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* dest checks data, then Dest puts */
		if (rank == dest) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "fence 2", &sendtype, &recvtype );
			}
		    }
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, source, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}

		/* Fence 3 */
		err = MPI_Win_fence( 0, win );
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* src checks data, then Src and dest puts*/
		if (rank == source) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "fence 3", &sendtype, &recvtype );
			}
		    }
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}
		if (rank == dest) {
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, source, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}

		/* Fence 4 */
		err = MPI_Win_fence( MPI_MODE_NOSUCCEED, win );
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* src and dest checks data */
		if (rank == source) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "src fence4", &sendtype, &recvtype );
			}
		    }
		}
		if (rank == dest) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "dest fence4", &sendtype, &recvtype );
			}
		    }
		}

		MPI_Win_free( &win );
		MTestFreeDatatype( &sendtype );
		MTestFreeDatatype( &recvtype );

		/* Only do one datatype in the simple case */
		if (onlyInt) break;
	    }
	    /* Only do one count in the simple case */
	    if (onlyInt) break;
	}
        MTestFreeComm(&comm);
	/* Only do one communicator in the simple case */
	if (onlyInt) break;
    }

    MTest_Finalize( errs );

    
    
    MPI_Finalize();
    return 0;
}
示例#16
0
int main( int argc, char *argv[] )
{
    int err;
    int buf[2];
    MPI_Win       win;
    MPI_Comm      comm;
    MPI_Errhandler newerr1, newerr2, olderr;


    MTest_Init( &argc, &argv );
    comm = MPI_COMM_WORLD;
    MPI_Win_create_errhandler( weh1, &newerr1 );
    MPI_Win_create_errhandler( weh2, &newerr2 );

    MPI_Win_create( buf, 2*sizeof(int), sizeof(int), 
		    MPI_INFO_NULL, comm, &win );
    
    mywin = win;
    MPI_Win_get_errhandler( win, &olderr );
    if (olderr != MPI_ERRORS_ARE_FATAL) {
	errs++;
	printf( "Expected errors are fatal\n" );
    }

    MPI_Win_set_errhandler( win, newerr1 );
    /* We should be able to free the error handler now since the window is
       using it */
    MPI_Errhandler_free( &newerr1 );
    
    expected_err_class = MPI_ERR_RANK;
    err = MPI_Put( buf, 1, MPI_INT, -5, 0, 1, MPI_INT, win );
    if (w1Called != 1) {
	errs ++;
	printf( "newerr1 not called\n" );
	w1Called = 1;
    }
    expected_err_class = MPI_ERR_OTHER;
    MPI_Win_call_errhandler( win, MPI_ERR_OTHER );
    if (w1Called != 2) {
	errs ++;
	printf( "newerr1 not called (2)\n" );
    }

    if (w1Called != 2 || w2Called != 0) {
	errs++;
	printf( "Error handler weh1 not called the expected number of times\n" );
    }

    /* Try another error handler.  This should allow the MPI implementation to 
       free the first error handler */
    MPI_Win_set_errhandler( win, newerr2 );
    MPI_Errhandler_free( &newerr2 );
    
    expected_err_class = MPI_ERR_RANK;
    err = MPI_Put( buf, 1, MPI_INT, -5, 0, 1, MPI_INT, win );
    if (w2Called != 1) {
	errs ++;
	printf( "newerr2 not called\n" );
	calls = 1;
    }
    expected_err_class = MPI_ERR_OTHER;
    MPI_Win_call_errhandler( win, MPI_ERR_OTHER );
    if (w2Called != 2) {
	errs ++;
	printf( "newerr2 not called (2)\n" );
    }
    if (w1Called != 2 || w2Called != 2) {
	errs++;
	printf( "Error handler weh1 not called the expected number of times\n" );
    }

    MPI_Win_free( &win );
	
    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
示例#17
0
文件: putfidx.c 项目: NexMirror/MPICH
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int i, rank, size, source, dest;
    int blksize, totsize;
    int *recvBuf = 0, *srcBuf = 0;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Aint extent;
    MPI_Datatype originType;
    int counts[2];
    int displs[2];

    MTest_Init(&argc, &argv);

    /* Select the communicator and datatypes */
    comm = MPI_COMM_WORLD;

    /* Create the datatype */
    /* One MPI Implementation fails this test with sufficiently large
     * values of blksize - it appears to convert this type to an
     * incorrect contiguous move */
    blksize = 2048;
    counts[0] = blksize;
    counts[1] = blksize;
    displs[0] = 0;
    displs[1] = blksize + 1;
    MPI_Type_indexed(2, counts, displs, MPI_INT, &originType);
    MPI_Type_commit(&originType);

    totsize = 2 * blksize;

    /* Determine the sender and receiver */
    MPI_Comm_rank(comm, &rank);
    MPI_Comm_size(comm, &size);
    source = 0;
    dest = size - 1;

    recvBuf = (int *) malloc(totsize * sizeof(int));
    srcBuf = (int *) malloc((totsize + 1) * sizeof(int));

    if (!recvBuf || !srcBuf) {
        fprintf(stderr, "Could not allocate buffers\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Initialize the send and recv buffers */
    for (i = 0; i < totsize; i++) {
        recvBuf[i] = -1;
    }
    for (i = 0; i < blksize; i++) {
        srcBuf[i] = i;
        srcBuf[blksize + 1 + i] = blksize + i;
    }
    srcBuf[blksize] = -1;

    MPI_Type_extent(MPI_INT, &extent);
    MPI_Win_create(recvBuf, totsize * extent, extent, MPI_INFO_NULL, comm, &win);
    MPI_Win_fence(0, win);
    if (rank == source) {
        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

        err = MPI_Put(srcBuf, 1, originType, dest, 0, totsize, MPI_INT, win);
        errs += CheckMPIErr(err);
        err = MPI_Win_fence(0, win);
        errs += CheckMPIErr(err);
    }
    else if (rank == dest) {
        MPI_Win_fence(0, win);
        for (i = 0; i < totsize; i++) {
            if (recvBuf[i] != i) {
                errs++;
                if (errs < 10) {
                    printf("recvBuf[%d] = %d should = %d\n", i, recvBuf[i], i);
                }
            }
        }
    }
    else {
        MPI_Win_fence(0, win);
    }

    MPI_Type_free(&originType);
    MPI_Win_free(&win);
    free(recvBuf);
    free(srcBuf);

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
示例#18
0
static inline int test(MPI_Comm comm, int rank, int source, int dest,
                       MTestDatatype * sendtype, MTestDatatype * recvtype)
{
    int errs = 0, err;
    MPI_Aint extent, lb;
    MPI_Win win;

    MTestPrintfMsg(1,
                   "Putting count = %ld of sendtype %s - count = %ld receive type %s\n",
                   sendtype->count, MTestGetDatatypeName(sendtype), recvtype->count,
                   MTestGetDatatypeName(recvtype));

    /* Make sure that everyone has a recv buffer */
    recvtype->InitBuf(recvtype);
    MPI_Type_extent(recvtype->datatype, &extent);
    MPI_Type_lb(recvtype->datatype, &lb);
    MPI_Win_create(recvtype->buf, recvtype->count * extent + lb, extent, MPI_INFO_NULL, comm, &win);
    MPI_Win_fence(0, win);
    if (rank == source) {
        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

        sendtype->InitBuf(sendtype);

        err = MPI_Put(sendtype->buf, sendtype->count,
                      sendtype->datatype, dest, 0, recvtype->count, recvtype->datatype, win);
        if (err) {
            errs++;
            if (errs < 10) {
                MTestPrintError(err);
            }
        }
        err = MPI_Win_fence(0, win);
        if (err) {
            errs++;
            if (errs < 10) {
                MTestPrintError(err);
            }
        }
    }
    else if (rank == dest) {
        MPI_Win_fence(0, win);
        /* This should have the same effect, in terms of
         * transfering data, as a send/recv pair */
        err = MTestCheckRecv(0, recvtype);
        if (err) {
            if (errs < 10) {
                printf
                    ("Data in target buffer did not match for destination datatype %s (put with source datatype %s)\n",
                     MTestGetDatatypeName(recvtype), MTestGetDatatypeName(sendtype));
                /* Redo the test, with the errors printed */
                recvtype->printErrors = 1;
                (void) MTestCheckRecv(0, recvtype);
            }
            errs += err;
        }
    }
    else {
        MPI_Win_fence(0, win);
    }
    MPI_Win_free(&win);

    return errs;
}
示例#19
0
int main(int argc, char **argv)
{
    int nprocs, mpi_err, *array;
    int getval, disp, errs=0;
    MPI_Win win;
    MPI_Datatype type;
    
    MTest_Init(&argc,&argv); 

    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    if (nprocs != 1) {
        printf("Run this program with 1 process\n");
        MPI_Abort(MPI_COMM_WORLD,1);
    }

    /* To improve reporting of problems about operations, we
       change the error handler to errors return */
    MPI_Comm_set_errhandler( MPI_COMM_WORLD, MPI_ERRORS_RETURN );

    /* create an indexed datatype that points to the second integer 
       in an array (the first integer is skipped). */
    disp  =  1;
    mpi_err = MPI_Type_create_indexed_block(1, 1, &disp, MPI_INT, &type);
    if (mpi_err != MPI_SUCCESS) goto err_return;
    mpi_err = MPI_Type_commit(&type);
    if (mpi_err != MPI_SUCCESS) goto err_return;

    /* allocate window of size 2 integers*/
    mpi_err = MPI_Alloc_mem(2*sizeof(int), MPI_INFO_NULL, &array);
    if (mpi_err != MPI_SUCCESS) goto err_return;

    /* create window object */
    mpi_err = MPI_Win_create(array, 2*sizeof(int), sizeof(int), 
                             MPI_INFO_NULL, MPI_COMM_WORLD, &win);
    if (mpi_err != MPI_SUCCESS) goto err_return;
 
    /* initialize array */
    array[0] = 100;
    array[1] = 200;

    getval = 0;
    
    /* To improve reporting of problems about operations, we
       change the error handler to errors return */
    MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN );

    mpi_err = MPI_Win_lock(MPI_LOCK_EXCLUSIVE, 0, 0, win);
    if (mpi_err != MPI_SUCCESS) goto err_return;

    /* get the current value of element array[1] */
    mpi_err = MPI_Get(&getval, 1, MPI_INT, 0, 0, 1, type, win);
    if (mpi_err != MPI_SUCCESS) goto err_return;

    mpi_err = MPI_Win_unlock(0, win);
    if (mpi_err != MPI_SUCCESS) goto err_return;

    /* getval should contain the value of array[1] */
    if (getval != array[1]) {
        errs++;
        printf("getval=%d, should be %d\n", getval, array[1]);
    }

    MPI_Free_mem(array);
    MPI_Win_free(&win);
    MPI_Type_free(&type);

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;

 err_return:
    printf("MPI function error returned an error\n");
    MTestPrintError( mpi_err );
    errs++;
    MTest_Finalize(errs);
    MPI_Finalize();
    return 1;
}
示例#20
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Win       win;
    MPI_Aint      extent;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {

		MTestPrintfMsg( 1, 
		       "Putting count = %d of sendtype %s receive type %s\n", 
				count, MTestGetDatatypeName( &sendtype ),
				MTestGetDatatypeName( &recvtype ) );

		/* Make sure that everyone has a recv buffer */
		recvtype.InitBuf( &recvtype );

		MPI_Type_extent( recvtype.datatype, &extent );
		MPI_Win_create( recvtype.buf, recvtype.count * extent, 
				extent, MPI_INFO_NULL, comm, &win );
		MPI_Win_fence( 0, win );
		if (rank == source) {
		    /* To improve reporting of problems about operations, we
		       change the error handler to errors return */
		    MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN );

		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		    err = MPI_Win_fence( 0, win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		}
		else if (rank == dest) {
		    MPI_Win_fence( 0, win );
		    /* This should have the same effect, in terms of
		       transfering data, as a send/recv pair */
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) {
			if (errs < 10) {
			    printf( "Data in target buffer did not match for destination datatype %s (put with source datatype %s)\n", 
				    MTestGetDatatypeName( &recvtype ),
				    MTestGetDatatypeName( &sendtype ) );
			    /* Redo the test, with the errors printed */
			    recvtype.printErrors = 1;
			    (void)MTestCheckRecv( 0, &recvtype );
			}
			errs += err;
		    }
		}
		else {
		    MPI_Win_fence( 0, win );
		}
		MPI_Win_free( &win );
		MTestFreeDatatype( &sendtype );
		MTestFreeDatatype( &recvtype );
	    }
	}
        MTestFreeComm(&comm);
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}