Example #1
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size;
    int *buf, bufsize;
    int *result;
    int *rmabuf, rsize, rcount;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Request req;
    MPI_Datatype derived_dtp;

    MTest_Init(&argc, &argv);

    bufsize = 256 * sizeof(int);
    buf = (int *) malloc(bufsize);
    if (!buf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    result = (int *) malloc(bufsize);
    if (!result) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    rcount = 16;
    rsize = rcount * sizeof(int);
    rmabuf = (int *) malloc(rsize);
    if (!rmabuf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_Type_contiguous(2, MPI_INT, &derived_dtp);
    MPI_Type_commit(&derived_dtp);

    /* The following loop is used to run through a series of communicators
     * that are subsets of MPI_COMM_WORLD, of size 1 or greater. */
    while (MTestGetIntracommGeneral(&comm, 1, 1)) {
        int count = 0;

        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);

        MPI_Win_create(buf, bufsize, 2 * sizeof(int), MPI_INFO_NULL, comm, &win);
        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

        /** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
        MPI_Win_fence(0, win);

        TEST_FENCE_OP("Put", MPI_Put(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);

        TEST_FENCE_OP("Get", MPI_Get(rmabuf, count, MPI_INT, TARGET, 0, count, MPI_INT, win);
);
Example #2
0
int main(int argc, char **argv)
{
    MPI_Datatype vec;
    MPI_Comm comm;
    double *vecin, *vecout;
    int minsize = 2, count;
    int root, i, n, stride, errs = 0;
    int rank, size;

    MTest_Init(&argc, &argv);

    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);

        for (root = 0; root < size; root++) {
            for (count = 1; count < 65000; count = count * 2) {
                n = 12;
                stride = 10;
                vecin = (double *) malloc(n * stride * size * sizeof(double));
                vecout = (double *) malloc(size * n * sizeof(double));

                MPI_Type_vector(n, 1, stride, MPI_DOUBLE, &vec);
                MPI_Type_commit(&vec);

                for (i = 0; i < n * stride; i++)
                    vecin[i] = -2;
                for (i = 0; i < n; i++)
                    vecin[i * stride] = rank * n + i;

                MPI_Gather(vecin, 1, vec, vecout, n, MPI_DOUBLE, root, comm);

                if (rank == root) {
                    for (i = 0; i < n * size; i++) {
                        if (vecout[i] != i) {
                            errs++;
                            if (errs < 10) {
                                fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]);
                            }
                        }
                    }
                }
                MPI_Type_free(&vec);
                free(vecin);
                free(vecout);
            }
        }
        MTestFreeComm(&comm);
    }

    /* do a zero length gather */
    MPI_Gather(NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD);

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Example #3
0
int main( int argc, char *argv[] )
{
    int size;
    int minsize = 2, count; 
    MPI_Comm      comm;
    int *buf, *bufout;
    MPI_Op op;
    MPI_Datatype mattype;

    MTest_Init( &argc, &argv );

    MPI_Op_create( uop, 0, &op );
    
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) {
	    continue;
	}
	MPI_Comm_size( comm, &size );
	matSize = size;

	/* Only one matrix for now */
	count = 1;

	/* A single matrix, the size of the communicator */
	MPI_Type_contiguous( size*size, MPI_INT, &mattype );
	MPI_Type_commit( &mattype );

	max_offset = count * size * size;
	buf = (int *)malloc( max_offset * sizeof(int) );
	if (!buf) {
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}
	bufout = (int *)malloc( max_offset * sizeof(int) );
	if (!bufout) {
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}

	initMat( comm, buf );
	MPI_Allreduce( buf, bufout, count, mattype, op, comm );
	errs += isIdentity( comm, bufout );

	/* Try the same test, but using MPI_IN_PLACE */
	initMat( comm, bufout );
	MPI_Allreduce( MPI_IN_PLACE, bufout, count, mattype, op, comm );
	errs += isIdentity( comm, bufout );

	free( buf );
	free( bufout );

	//MPI_Type_free( &mattype );
	MTestFreeComm( &comm );
    }

   // MPI_Op_free( &op );

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #4
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size;
    int *buf, bufsize;
    int *result;
    int *rmabuf, rsize, rcount;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Request req;

    MTest_Init(&argc, &argv);

    bufsize = 256 * sizeof(int);
    buf = (int *) malloc(bufsize);
    if (!buf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    result = (int *) malloc(bufsize);
    if (!result) {
        fprintf(stderr, "Unable to allocated %d bytes\n", bufsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }
    rcount = 16;
    rsize = rcount * sizeof(int);
    rmabuf = (int *) malloc(rsize);
    if (!rmabuf) {
        fprintf(stderr, "Unable to allocated %d bytes\n", rsize);
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, 1, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);

        MPI_Win_create(buf, bufsize, sizeof(int), MPI_INFO_NULL, comm, &win);
        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

        /** TEST OPERATIONS USING ACTIVE TARGET (FENCE) SYNCHRONIZATION **/
        MPI_Win_fence(0, win);

        TEST_FENCE_OP("Put",
                      MPI_Put(rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win);
);

        TEST_FENCE_OP("Get",
                      MPI_Get(rmabuf, rcount, MPI_INT, MPI_PROC_NULL, 0, rcount, MPI_INT, win);
);
Example #5
0
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size, source, dest;
    int minsize = 2, count;
    MPI_Comm comm;
    MTestDatatype sendtype, recvtype;

    MTest_Init(&argc, &argv);
    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        source = 0;
        dest = size - 1;

        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
                errs += test(comm, rank, source, dest, &sendtype, &recvtype);
                MTestFreeDatatype(&sendtype);
                MTestFreeDatatype(&recvtype);
            }
        }
        MTestFreeComm(&comm);
    }

    /* Part #2: simple large size test - contiguous and noncontiguous */
    if (sizeof(void *) > 4) {   /* Only if > 32-bit architecture */
        MPI_Comm_rank(MPI_COMM_WORLD, &rank);
        MPI_Comm_size(MPI_COMM_WORLD, &size);
        source = 0;
        dest = size - 1;

        MTestGetDatatypes(&sendtype, &recvtype, LARGE_CNT_CONTIG);
        errs += test(MPI_COMM_WORLD, rank, source, dest, &sendtype, &recvtype);

        do {
            MTestFreeDatatype(&sendtype);
            MTestFreeDatatype(&recvtype);
            MTestGetDatatypes(&sendtype, &recvtype, LARGE_CNT_NONCONTIG);
        } while (strstr(MTestGetDatatypeName(&sendtype), "vector") == NULL);
        errs += test(MPI_COMM_WORLD, rank, source, dest, &sendtype, &recvtype);
        MTestFreeDatatype(&sendtype);
        MTestFreeDatatype(&recvtype);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Example #6
0
int main( int argc, char **argv )
{
    double *vecout;
    MPI_Comm comm;
    int    count, minsize = 2;
    int    i, errs = 0;
    int    rank, size;
    int    *displs, *recvcounts;

    MTest_Init( &argc, &argv );

    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );

	displs     = (int *)malloc( size * sizeof(int) );
	recvcounts = (int *)malloc( size * sizeof(int) );
	
        for (count = 1; count < 9000; count = count * 2) {
            vecout = (double *)malloc( size * count * sizeof(double) );
            
            for (i=0; i<count; i++) {
                vecout[rank*count+i] = rank*count+i;
            }
            for (i=0; i<size; i++) {
                recvcounts[i] = count;
                displs[i]    = i * count;
            }
            MPI_Allgatherv( MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, 
                            vecout, recvcounts, displs, MPI_DOUBLE, comm );
            for (i=0; i<count*size; i++) {
                if (vecout[i] != i) {
                    errs++;
                    if (errs < 10) {
                        fprintf( stderr, "vecout[%d]=%d\n",
                                 i, (int)vecout[i] );
                    }
                }
            }
            free( vecout );
        }

	free( displs );
	free( recvcounts );
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #7
0
int main( int argc, char *argv[] )
{
    MPI_Comm comm;
    MPI_Datatype dtype;
    int count, *bufin, *bufout, size, i, minsize=1;

    MTest_Init( &argc, &argv );
    
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) {
	    continue;
	}
	MPI_Comm_size( comm, &size );
	count = size * 2;
	bufin = (int *)malloc( count * sizeof(int) );
	bufout = (int *)malloc( count * sizeof(int) );
	if (!bufin || !bufout) {
	    fprintf( stderr, "Unable to allocated space for buffers (%d)\n",
		     count );
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}
	for (i=0; i<count; i++) {
	    bufin[i] = i;
	    bufout[i] = -1;
	}

	dtype = MPI_INT;
	MPI_Allreduce( bufin, bufout, count, dtype, MPI_SUM, comm );
	/* Check output */
	for (i=0; i<count; i++) {
	    if (bufout[i] != i * size) {
		fprintf( stderr, "Expected bufout[%d] = %d but found %d\n",
			 i, i * size, bufout[i] );
		errs++;
	    }
	}
	free( bufin );
	free( bufout );
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #8
0
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size;
    int minsize = 2, count;
    MPI_Comm comm;
    MPI_Op op;
    int *buf, i;

    MTest_Init(&argc, &argv);

    MPI_Op_create(mysum, 0, &op);

    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        MPI_Comm_size(comm, &size);
        MPI_Comm_rank(comm, &rank);

        for (count = 1; count < 65000; count = count * 2) {
            /* Contiguous data */
            buf = (int *) malloc(count * sizeof(int));
            for (i = 0; i < count; i++)
                buf[i] = rank + i;
            MPI_Allreduce(MPI_IN_PLACE, buf, count, MPI_INT, op, comm);
            /* Check the results */
            for (i = 0; i < count; i++) {
                int result = i * size + (size * (size - 1)) / 2;
                if (buf[i] != result) {
                    errs++;
                    if (errs < 10) {
                        fprintf(stderr, "buf[%d] = %d expected %d\n", i, buf[i], result);
                    }
                }
            }
            free(buf);
        }
        MTestFreeComm(&comm);
    }
    MPI_Op_free(&op);

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
Example #9
0
int main( int argc, char **argv )
{

    MPI_Comm comm;
    int      *sbuf, *rbuf;
    int      rank, size;
    int      *sendcounts, *recvcounts, *rdispls, *sdispls;
    int      i, *p, err;
    int      left, right, length;
    
    MTest_Init( &argc, &argv );
    err = 0;
    
    while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
      if (comm == MPI_COMM_NULL) continue;

      MPI_Comm_size( comm, &size );
      MPI_Comm_rank( comm, &rank );
      
      if (size < 3) continue;

      /* Create and load the arguments to alltoallv */
      sendcounts = (int *)malloc( size * sizeof(int) );
      recvcounts = (int *)malloc( size * sizeof(int) );
      rdispls    = (int *)malloc( size * sizeof(int) );
      sdispls    = (int *)malloc( size * sizeof(int) );
      if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
	fprintf( stderr, "Could not allocate arg items!\n" );
	MPI_Abort( comm, 1 );
        exit(1);
      }

      /* Get the neighbors */
      left  = (rank - 1 + size) % size;
      right = (rank + 1) % size;

      /* Set the defaults */
      for (i=0; i<size; i++) {
	  sendcounts[i] = 0;
	  recvcounts[i] = 0;
	  rdispls[i]    = 0;
	  sdispls[i]    = 0;
      }

      for (length=1; length < 66000; length = length*2+1 ) {
	  /* Get the buffers */
	  sbuf = (int *)malloc( 2 * length * sizeof(int) );
	  rbuf = (int *)malloc( 2 * length * sizeof(int) );
	  if (!sbuf || !rbuf) {
	      fprintf( stderr, "Could not allocate buffers!\n" );
	      MPI_Abort( comm, 1 );
              exit(1);
	  }
	  
	  /* Load up the buffers */
	  for (i=0; i<length; i++) {
	      sbuf[i]        = i + 100000*rank;
	      sbuf[i+length] = i + 100000*rank;
	      rbuf[i]        = -i;
	      rbuf[i+length] = -i-length;
	  }
	  sendcounts[left]  = length;
	  sendcounts[right] = length;
	  recvcounts[left]  = length;
	  recvcounts[right] = length;
	  rdispls[left]     = 0;
	  rdispls[right]    = length;
	  sdispls[left]     = 0;
	  sdispls[right]    = length;
      
	  MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
			 rbuf, recvcounts, rdispls, MPI_INT, comm );
      
	  /* Check rbuf */
	  p = rbuf;          /* left */

	  for (i=0; i<length; i++) {
	      if (p[i] != i + 100000 * left) {
		  if (err < 10) {
		      fprintf( stderr, "[%d from %d] got %d expected %d for %dth\n", 
			       rank, left, p[i], i + 100000 * left, i );
		  }
		  err++;
	      }
	  }

	  p = rbuf + length; /* right */
	  for (i=0; i<length; i++) {
	      if (p[i] != i + 100000 * right) {
		  if (err < 10) {
		      fprintf( stderr, "[%d from %d] got %d expected %d for %dth\n", 
			       rank, right, p[i], i + 100000 * right, i );
		  }
		  err++;
	      }
	  }

	  free( rbuf );
	  free( sbuf );
      }
	  
      free( sdispls );
      free( rdispls );
      free( recvcounts );
      free( sendcounts );
      MTestFreeComm( &comm );
    }

    MTest_Finalize( err );
    MPI_Finalize();
    return 0;
}
Example #10
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size, root;
    int minsize = 2, count; 
    MPI_Comm      comm;
    int *buf, *bufout;
    MPI_Op op;
    MPI_Datatype mattype;

    MTest_Init( &argc, &argv );

    MPI_Op_create( uop, 0, &op );
    
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;

	MPI_Comm_size( comm, &size );
	MPI_Comm_rank( comm, &rank );

	matSize = size;  /* used by the user-defined operation */
	/* Only one matrix for now */
	count = 1;

	/* A single matrix, the size of the communicator */
	MPI_Type_contiguous( size*size, MPI_INT, &mattype );
	MPI_Type_commit( &mattype );
	
	buf = (int *)malloc( count * size * size * sizeof(int) );
	if (!buf) MPI_Abort( MPI_COMM_WORLD, 1 );
	bufout = (int *)malloc( count * size * size * sizeof(int) );
	if (!bufout) MPI_Abort( MPI_COMM_WORLD, 1 );

	for (root = 0; root < size; root ++) {
	    initMat( comm, buf );
	    MPI_Reduce( buf, bufout, count, mattype, op, root, comm );
	    if (rank == root) {
		errs += isShiftLeft( comm, bufout );
	    }

	    /* Try the same test, but using MPI_IN_PLACE */
	    initMat( comm, bufout );
	    if (rank == root) {
		MPI_Reduce( MPI_IN_PLACE, bufout, count, mattype, op, root, comm );
	    }
	    else {
		MPI_Reduce( bufout, NULL, count, mattype, op, root, comm );
	    }
	    if (rank == root) {
		errs += isShiftLeft( comm, bufout );
	    }

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
            /* Try one more time without IN_PLACE to make sure we check
             * aliasing correctly */
            if (rank == root) {
                MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
                if (MPI_SUCCESS == MPI_Reduce( bufout, bufout, count, mattype, op, root, comm ))
                    errs++;
            }
#endif
	}

	free( buf );
	free( bufout );
	
	MPI_Type_free( &mattype );

	MTestFreeComm( &comm );
    }

    MPI_Op_free( &op );

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #11
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Win       win;
    MPI_Aint      extent;
    MPI_Group     wingroup, neighbors;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
		/* Make sure that everyone has a recv buffer */
		recvtype.InitBuf( &recvtype );

		MPI_Type_extent( recvtype.datatype, &extent );
		MPI_Win_create( recvtype.buf, recvtype.count * extent, 
				(int)extent, MPI_INFO_NULL, comm, &win );
		MPI_Win_get_group( win, &wingroup );
		if (rank == source) {
		    /* To improve reporting of problems about operations, we
		       change the error handler to errors return */
		    MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN );
		    sendtype.InitBuf( &sendtype );
		    
		    /* Neighbor is dest only */
		    MPI_Group_incl( wingroup, 1, &dest, &neighbors );
		    err = MPI_Win_start( neighbors, 0, win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		    MPI_Group_free( &neighbors );
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				    sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) {
			errs++;
			MTestPrintError( err );
		    }
		    err = MPI_Win_complete( win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		}
		else if (rank == dest) {
		    MPI_Group_incl( wingroup, 1, &source, &neighbors );
		    MPI_Win_post( neighbors, 0, win );
		    MPI_Group_free( &neighbors );
		    MPI_Win_wait( win );
		    /* This should have the same effect, in terms of
		       transfering data, as a send/recv pair */
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) {
			errs += errs;
		    }
		}
		else {
		    /* Nothing; the other processes need not call any 
		       MPI routines */
		    ;
		}
		MPI_Win_free( &win );
		MTestFreeDatatype( &sendtype );
		MTestFreeDatatype( &recvtype );
		MPI_Group_free( &wingroup );
	    }
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #12
0
/*
 * Get an intracommunicator with at least min_size members.
 */
int MTestGetIntracomm(MPI_Comm * comm, int min_size)
{
    return MTestGetIntracommGeneral(comm, min_size, 0);
}
Example #13
0
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size, source, dest;
    unsigned char *buf, *bufp;
    int minsize = 2;
    int i, msgsize, bufsize, outsize;
    unsigned char *msg1, *msg2, *msg3;
    MPI_Comm comm;
    MPI_Status status1, status2, status3;

    MTest_Init(&argc, &argv);

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    msgsize = 128 * 1024;
    msg1 = (unsigned char *) malloc(3 * msgsize);
    msg2 = msg1 + msgsize;
    msg3 = msg2 + msgsize;
    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        source = 0;
        dest = size - 1;

        /* Here is the test:  The sender */
        if (rank == source) {
            /* Get a bsend buffer.  Make it large enough that the Bsend
             * internals will (probably) not use a eager send for the data.
             * Have three such messages */
            bufsize = 3 * (MPI_BSEND_OVERHEAD + msgsize);
            buf = (unsigned char *) malloc(bufsize);
            if (!buf) {
                fprintf(stderr, "Unable to allocate a buffer of %d bytes\n", bufsize);
                MPI_Abort(MPI_COMM_WORLD, 1);
            }

            MPI_Buffer_attach(buf, bufsize);

            /* Initialize the buffers */
            for (i = 0; i < msgsize; i++) {
                msg1[i] = 0xff ^ (i & 0xff);
                msg2[i] = 0xff ^ (3 * i & 0xff);
                msg3[i] = 0xff ^ (5 * i & 0xff);
            }

            /* Initiate the bsends */
            MPI_Bsend(msg1, msgsize, MPI_UNSIGNED_CHAR, dest, 0, comm);
            MPI_Bsend(msg2, msgsize, MPI_UNSIGNED_CHAR, dest, 0, comm);
            MPI_Bsend(msg3, msgsize, MPI_UNSIGNED_CHAR, dest, 0, comm);

            /* Synchronize with our partner */
            MPI_Sendrecv(NULL, 0, MPI_UNSIGNED_CHAR, dest, 10,
                         NULL, 0, MPI_UNSIGNED_CHAR, dest, 10, comm, MPI_STATUS_IGNORE);

            /* Detach the buffers.  There should be pending operations */
            MPI_Buffer_detach(&bufp, &outsize);
            if (bufp != buf) {
                fprintf(stderr, "Wrong buffer returned\n");
                errs++;
            }
            if (outsize != bufsize) {
                fprintf(stderr, "Wrong buffer size returned\n");
                errs++;
            }
        }
        else if (rank == dest) {
            double tstart;

            /* Clear the message buffers */
            for (i = 0; i < msgsize; i++) {
                msg1[i] = 0;
                msg2[i] = 0;
                msg3[i] = 0;
            }

            /* Wait for the synchronize */
            MPI_Sendrecv(NULL, 0, MPI_UNSIGNED_CHAR, source, 10,
                         NULL, 0, MPI_UNSIGNED_CHAR, source, 10, comm, MPI_STATUS_IGNORE);

            /* Wait 2 seconds */
            tstart = MPI_Wtime();
            while (MPI_Wtime() - tstart < 2.0);

            /* Now receive the messages */
            MPI_Recv(msg1, msgsize, MPI_UNSIGNED_CHAR, source, 0, comm, &status1);
            MPI_Recv(msg2, msgsize, MPI_UNSIGNED_CHAR, source, 0, comm, &status2);
            MPI_Recv(msg3, msgsize, MPI_UNSIGNED_CHAR, source, 0, comm, &status3);

            /* Check that we have the correct data */
            for (i = 0; i < msgsize; i++) {
                if (msg1[i] != (0xff ^ (i & 0xff))) {
                    if (errs < 10) {
                        fprintf(stderr, "msg1[%d] = %d\n", i, msg1[i]);
                    }
                    errs++;
                }
                if (msg2[i] != (0xff ^ (3 * i & 0xff))) {
                    if (errs < 10) {
                        fprintf(stderr, "msg2[%d] = %d\n", i, msg2[i]);
                    }
                    errs++;
                }
                if (msg3[i] != (0xff ^ (5 * i & 0xff))) {
                    if (errs < 10) {
                        fprintf(stderr, "msg2[%d] = %d\n", i, msg2[i]);
                    }
                    errs++;
                }
            }

        }


        MTestFreeComm(&comm);
    }
    free(msg1);

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Example #14
0
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size, source;
    int minsize = 2, count, i;
    MPI_Comm comm;
    MPI_Win win;
    int *winbuf, *sbuf;

    MTest_Init(&argc, &argv);

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        source = 0;

        for (count = 32768; count < 65000; count = count * 2) {

            /* We compare with an integer value that can be as large as
             * size * (count * count + (1/2)*(size-1))
             * For large machines (size large), this can exceed the
             * maximum integer for some large values of count.  We check
             * that in advance and break this loop if the above value
             * would exceed MAX_INT.  Specifically,
             *
             * size*count*count + (1/2)*size*(size-1) > MAX_INT
             * count*count > (MAX_INT/size - (1/2)*(size-1))
             */
            if (count * count > (MAX_INT / size - (size - 1) / 2))
                break;

            MPI_Alloc_mem(count * sizeof(int), MPI_INFO_NULL, &winbuf);
            MPI_Alloc_mem(count * sizeof(int), MPI_INFO_NULL, &sbuf);

            for (i = 0; i < count; i++)
                winbuf[i] = 0;
            for (i = 0; i < count; i++)
                sbuf[i] = rank + i * count;
            MPI_Win_create(winbuf, count * sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win);
            MPI_Win_fence(0, win);
            MPI_Accumulate(sbuf, count, MPI_INT, source, 0, count, MPI_INT, MPI_SUM, win);
            MPI_Win_fence(0, win);
            if (rank == source) {
                /* Check the results */
                for (i = 0; i < count; i++) {
                    int result = i * count * size + (size * (size - 1)) / 2;
                    if (winbuf[i] != result) {
                        if (errs < 10) {
                            fprintf(stderr,
                                    "Winbuf[%d] = %d, expected %d (count = %d, size = %d)\n", i,
                                    winbuf[i], result, count, size);
                        }
                        errs++;
                    }
                }
            }

            MPI_Win_free(&win);

            MPI_Free_mem(winbuf);
            MPI_Free_mem(sbuf);
        }
        MTestFreeComm(&comm);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Example #15
0
int main(int argc, char *argv[])
{
    int err, errs = 0;
    int rank, size, orig, target;
    int minsize = 2, count;
    int i, j;
    MPI_Aint origcount, targetcount;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Aint lb, extent;
    MPI_Datatype origtype, targettype;
    DTP_t orig_dtp, target_dtp;
    void *origbuf, *targetbuf;

    MTest_Init(&argc, &argv);

#ifndef USE_DTP_POOL_TYPE__STRUCT       /* set in 'test/mpi/structtypetest.txt' to split tests */
    MPI_Datatype basic_type;
    int len;
    char type_name[MPI_MAX_OBJECT_NAME] = { 0 };

    err = MTestInitBasicSignature(argc, argv, &count, &basic_type);
    if (err)
        return MTestReturnValue(1);

    err = DTP_pool_create(basic_type, count, &orig_dtp);
    if (err != DTP_SUCCESS) {
        MPI_Type_get_name(basic_type, type_name, &len);
        fprintf(stdout, "Error while creating orig pool (%s,%d)\n", type_name, count);
        fflush(stdout);
    }

    err = DTP_pool_create(basic_type, count, &target_dtp);
    if (err != DTP_SUCCESS) {
        MPI_Type_get_name(basic_type, type_name, &len);
        fprintf(stdout, "Error while creating target pool (%s,%d)\n", type_name, count);
        fflush(stdout);
    }
#else
    MPI_Datatype *basic_types = NULL;
    int *basic_type_counts = NULL;
    int basic_type_num;

    err = MTestInitStructSignature(argc, argv, &basic_type_num, &basic_type_counts, &basic_types);
    if (err)
        return MTestReturnValue(1);

    err = DTP_pool_create_struct(basic_type_num, basic_types, basic_type_counts, &orig_dtp);
    if (err != DTP_SUCCESS) {
        fprintf(stdout, "Error while creating struct pool\n");
        fflush(stdout);
    }

    err = DTP_pool_create_struct(basic_type_num, basic_types, basic_type_counts, &target_dtp);
    if (err != DTP_SUCCESS) {
        fprintf(stdout, "Error while creating struct pool\n");
        fflush(stdout);
    }

    /* this is ignored */
    count = 0;
#endif

    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;

        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        orig = 0;
        target = size - 1;

        for (i = 0; i < target_dtp->DTP_num_objs; i++) {
            err = DTP_obj_create(target_dtp, i, 0, 0, 0);
            if (err != DTP_SUCCESS) {
                errs++;
                break;
            }

            targetcount = target_dtp->DTP_obj_array[i].DTP_obj_count;
            targettype = target_dtp->DTP_obj_array[i].DTP_obj_type;
            targetbuf = target_dtp->DTP_obj_array[i].DTP_obj_buf;

            MPI_Type_get_extent(targettype, &lb, &extent);

            MPI_Win_create(targetbuf, lb + targetcount * extent,
                           (int) extent, MPI_INFO_NULL, comm, &win);

            for (j = 0; j < orig_dtp->DTP_num_objs; j++) {
                err = DTP_obj_create(orig_dtp, j, 0, 1, count);
                if (err != DTP_SUCCESS) {
                    errs++;
                    break;
                }

                origcount = orig_dtp->DTP_obj_array[j].DTP_obj_count;
                origtype = orig_dtp->DTP_obj_array[j].DTP_obj_type;
                origbuf = orig_dtp->DTP_obj_array[j].DTP_obj_buf;

                if (rank == orig) {
                    MPI_Win_lock(MPI_LOCK_SHARED, target, 0, win);
                    MPI_Accumulate(origbuf, origcount,
                                   origtype, target, 0, targetcount, targettype, MPI_REPLACE, win);
                    MPI_Win_unlock(target, win);
                    MPI_Barrier(comm);

                    char *resbuf = (char *) calloc(lb + extent * targetcount, sizeof(char));

                    /*wait for the destination to finish checking and reinitializing the buffer */
                    MPI_Barrier(comm);

                    MPI_Win_lock(MPI_LOCK_SHARED, target, 0, win);
                    MPI_Get_accumulate(origbuf, origcount,
                                       origtype, resbuf, targetcount, targettype,
                                       target, 0, targetcount, targettype, MPI_REPLACE, win);
                    MPI_Win_unlock(target, win);
                    MPI_Barrier(comm);
                    free(resbuf);
                } else if (rank == target) {
                    /* TODO: add a DTP_buf_set() function to replace this */
                    char *tmp = (char *) calloc(lb + extent * targetcount, sizeof(char));
                    memcpy(tmp, targetbuf, lb + extent * targetcount);

                    MPI_Barrier(comm);
                    MPI_Win_lock(MPI_LOCK_SHARED, target, 0, win);
                    err = DTP_obj_buf_check(target_dtp, i, 0, 1, count);
                    if (err != DTP_SUCCESS) {
                        errs++;
                    }
                    /* restore target buffer */
                    memcpy(targetbuf, tmp, lb + extent * targetcount);
                    free(tmp);

                    MPI_Win_unlock(target, win);

                    /*signal the source that checking and reinitialization is done */
                    MPI_Barrier(comm);

                    MPI_Barrier(comm);
                    MPI_Win_lock(MPI_LOCK_SHARED, target, 0, win);
                    err = DTP_obj_buf_check(target_dtp, i, 0, 1, count);
                    if (err != DTP_SUCCESS) {
                        errs++;
                    }
                    MPI_Win_unlock(target, win);
                }
                DTP_obj_free(orig_dtp, j);
            }
            MPI_Win_free(&win);
            DTP_obj_free(target_dtp, i);
        }
        MTestFreeComm(&comm);
    }
    DTP_pool_free(orig_dtp);
    DTP_pool_free(target_dtp);

#ifdef USE_DTP_POOL_TYPE__STRUCT
    /* cleanup array if any */
    if (basic_types) {
        free(basic_types);
    }
    if (basic_type_counts) {
        free(basic_type_counts);
    }
#endif

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
Example #16
0
int main( int argc, char **argv )
{

    MPI_Comm comm;
    int      *sbuf, *rbuf;
    int      rank, size;
    int      *sendcounts, *recvcounts, *rdispls, *sdispls;
    int      i, j, *p, err;
    MPI_Datatype *sendtypes, *recvtypes;
    
    MTest_Init( &argc, &argv );
    err = 0;
    
    while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
      if (comm == MPI_COMM_NULL) continue;

      /* Create the buffer */
      MPI_Comm_size( comm, &size );
      MPI_Comm_rank( comm, &rank );
      sbuf = (int *)malloc( size * size * sizeof(int) );
      rbuf = (int *)malloc( size * size * sizeof(int) );
      if (!sbuf || !rbuf) {
	fprintf( stderr, "Could not allocated buffers!\n" );
	MPI_Abort( comm, 1 );
      }
      
      /* Load up the buffers */
      for (i=0; i<size*size; i++) {
	sbuf[i] = i + 100*rank;
	rbuf[i] = -i;
      }
      
      /* Create and load the arguments to alltoallv */
      sendcounts = (int *)malloc( size * sizeof(int) );
      recvcounts = (int *)malloc( size * sizeof(int) );
      rdispls    = (int *)malloc( size * sizeof(int) );
      sdispls    = (int *)malloc( size * sizeof(int) );
      sendtypes    = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
      recvtypes    = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
      if (!sendcounts || !recvcounts || !rdispls || !sdispls || !sendtypes || !recvtypes) {
	fprintf( stderr, "Could not allocate arg items!\n" );
	MPI_Abort( comm, 1 );
      }
      /* Note that process 0 sends no data (sendcounts[0] = 0) */
      for (i=0; i<size; i++) {
	sendcounts[i] = i;
	recvcounts[i] = rank;
	rdispls[i]    = i * rank * sizeof(int);
	sdispls[i]    = (((i+1) * (i))/2) * sizeof(int);
        sendtypes[i] = recvtypes[i] = MPI_INT;
      }
      MPI_Alltoallw( sbuf, sendcounts, sdispls, sendtypes,
		     rbuf, recvcounts, rdispls, recvtypes, comm );
      
      /* Check rbuf */
      for (i=0; i<size; i++) {
	p = rbuf + rdispls[i]/sizeof(int);
	for (j=0; j<rank; j++) {
	  if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
	    fprintf( stderr, "[%d] got %d expected %d for %dth\n",
		     rank, p[j],(i*(i+1))/2 + j, j );
	    err++;
	  }
	}
      }

      free(sendtypes);
      free(sdispls);
      free(sendcounts);
      free(sbuf);

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
      /* check MPI_IN_PLACE, added in MPI-2.2 */
      free( rbuf );
      rbuf = (int *)malloc( size * (2 * size) * sizeof(int) );
      if (!rbuf) {
        fprintf( stderr, "Could not reallocate rbuf!\n" );
        MPI_Abort( comm, 1 );
      }

      /* Load up the buffers */
      for (i = 0; i < size; i++) {
        /* alltoallw displs are in bytes, not in type extents */
        rdispls[i]    = i * (2 * size) * sizeof(int);
        recvtypes[i]  = MPI_INT;
        recvcounts[i] = i + rank;
      }
      memset(rbuf, -1, size * (2 * size) * sizeof(int));
      for (i=0; i < size; i++) {
        p = rbuf + (rdispls[i] / sizeof(int));
        for (j = 0; j < recvcounts[i]; ++j) {
          p[j] = 100 * rank + 10 * i + j;
        }
      }

      MPI_Alltoallw( MPI_IN_PLACE, NULL, NULL, NULL,
                     rbuf, recvcounts, rdispls, recvtypes, comm );

      /* Check rbuf */
      for (i=0; i<size; i++) {
        p = rbuf + (rdispls[i] / sizeof(int));
        for (j=0; j<recvcounts[i]; j++) {
          int expected = 100 * i + 10 * rank + j;
          if (p[j] != expected) {
            fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
                    rank, p[j], expected, i, j);
            ++err;
          }
        }
      }
#endif

      free(recvtypes);
      free(rdispls);
      free(recvcounts);
      free(rbuf);
      MTestFreeComm( &comm );
    }

    MTest_Finalize( err );
    MPI_Finalize();
    return 0;
}
Example #17
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Status    status;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;

	/* To improve reporting of problems about operations, we
	   change the error handler to errors return */
	MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
		if (rank == source) {
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Send( sendtype.buf, sendtype.count, 
				    sendtype.datatype, dest, 0, comm );
		    if (err) {
			errs++;
			MTestPrintError( err );
		    }
		    MTestFreeDatatype( &sendtype );
		}
		else if (rank == dest) {
		    recvtype.InitBuf( &recvtype );
		    err = MPI_Recv( recvtype.buf, recvtype.count, 
				    recvtype.datatype, source, 0, comm, &status );
		    if (err) {
			errs++;
			fprintf( stderr, "Error with communicator %s and datatype %s\n", 
				 MTestGetIntracommName(), 
				 MTestGetDatatypeName( &recvtype ) );
			MTestPrintError( err );
		    }
		    err = MTestCheckRecv( &status, &recvtype );
		    if (err) {
			errs += errs;
		    }
		    MTestFreeDatatype( &recvtype );
		}
	    }
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #18
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count[2], nmsg, maxmsg;
    int i, j, len;
    MPI_Aint sendcount, recvcount;
    MPI_Comm comm;
    MPI_Datatype sendtype, recvtype;
    DTP_t send_dtp, recv_dtp;
    char send_name[MPI_MAX_OBJECT_NAME] = { 0 };
    char recv_name[MPI_MAX_OBJECT_NAME] = { 0 };
    void *sendbuf, *recvbuf;

    MTest_Init(&argc, &argv);

#ifndef USE_DTP_POOL_TYPE__STRUCT       /* set in 'test/mpi/structtypetest.txt' to split tests */
    MPI_Datatype basic_type;
    char type_name[MPI_MAX_OBJECT_NAME] = { 0 };

    err = MTestInitBasicPt2ptSignature(argc, argv, count, &basic_type);
    if (err)
        return MTestReturnValue(1);

    err = DTP_pool_create(basic_type, count[0], &send_dtp);
    if (err != DTP_SUCCESS) {
        MPI_Type_get_name(basic_type, type_name, &len);
        fprintf(stdout, "Error while creating send pool (%s,%d)\n", type_name, count[0]);
        fflush(stdout);
    }

    err = DTP_pool_create(basic_type, count[1], &recv_dtp);
    if (err != DTP_SUCCESS) {
        MPI_Type_get_name(basic_type, type_name, &len);
        fprintf(stdout, "Error while creating recv pool (%s,%d)\n", type_name, count[1]);
        fflush(stdout);
    }
#else
    MPI_Datatype *basic_types = NULL;
    int *basic_type_counts = NULL;
    int basic_type_num;

    err = MTestInitStructSignature(argc, argv, &basic_type_num, &basic_type_counts, &basic_types);
    if (err)
        return MTestReturnValue(1);

    err = DTP_pool_create_struct(basic_type_num, basic_types, basic_type_counts, &send_dtp);
    if (err != DTP_SUCCESS) {
        fprintf(stdout, "Error while creating struct pool\n");
        fflush(stdout);
    }

    err = DTP_pool_create_struct(basic_type_num, basic_types, basic_type_counts, &recv_dtp);
    if (err != DTP_SUCCESS) {
        fprintf(stdout, "Error while creating struct pool\n");
        fflush(stdout);
    }

    /* these are ignored */
    count[0] = 0;
    count[1] = 0;
#endif

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        source = 0;
        dest = size - 1;

        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);

        for (i = 0; i < send_dtp->DTP_num_objs; i++) {
            err = DTP_obj_create(send_dtp, i, 0, 1, count[0]);
            if (err != DTP_SUCCESS) {
                errs++;
                break;
            }

            sendcount = send_dtp->DTP_obj_array[i].DTP_obj_count;
            sendtype = send_dtp->DTP_obj_array[i].DTP_obj_type;
            sendbuf = send_dtp->DTP_obj_array[i].DTP_obj_buf;

            for (j = 0; j < recv_dtp->DTP_num_objs; j++) {
                int nbytes;
                MPI_Type_size(sendtype, &nbytes);
                maxmsg = MAX_COUNT - count[0];

                err = DTP_obj_create(recv_dtp, j, 0, 0, 0);
                if (err != DTP_SUCCESS) {
                    errs++;
                    break;
                }

                recvcount = recv_dtp->DTP_obj_array[j].DTP_obj_count;
                recvtype = recv_dtp->DTP_obj_array[j].DTP_obj_type;
                recvbuf = recv_dtp->DTP_obj_array[j].DTP_obj_buf;

                /* We may want to limit the total message size sent */
                if (nbytes > MAX_MSG_SIZE) {
                    continue;
                }

                if (rank == source) {
                    MPI_Type_get_name(sendtype, send_name, &len);
                    MTestPrintfMsg(1, "Sending count = %d of sendtype %s of total size %d bytes\n",
                                   count[0], send_name, nbytes * count[0]);

                    for (nmsg = 1; nmsg < maxmsg; nmsg++) {
                        err = MPI_Send(sendbuf, sendcount, sendtype, dest, 0, comm);
                        if (err) {
                            errs++;
                            if (errs < 10) {
                                MTestPrintError(err);
                            }
                        }
                    }
                } else if (rank == dest) {
                    for (nmsg = 1; nmsg < maxmsg; nmsg++) {
                        err =
                            MPI_Recv(recvbuf, recvcount, recvtype, source, 0, comm,
                                     MPI_STATUS_IGNORE);
                        if (err) {
                            errs++;
                            if (errs < 10) {
                                MTestPrintError(err);
                            }
                        }

                        err = DTP_obj_buf_check(recv_dtp, j, 0, 1, count[0]);
                        if (err != DTP_SUCCESS) {
                            if (errs < 10) {
                                MPI_Type_get_name(sendtype, send_name, &len);
                                MPI_Type_get_name(recvtype, recv_name, &len);
                                fprintf(stdout,
                                        "Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d, message iteration %d of %d\n",
                                        recv_name, send_name, count[0], nmsg, maxmsg);
                                fflush(stdout);
                            }
                            errs++;
                        }
                    }
                }
                DTP_obj_free(recv_dtp, j);
            }
            DTP_obj_free(send_dtp, i);
        }
        MTestFreeComm(&comm);
    }

    DTP_pool_free(send_dtp);
    DTP_pool_free(recv_dtp);

#ifdef USE_DTP_POOL_TYPE__STRUCT
    /* cleanup array if any */
    if (basic_types) {
        free(basic_types);
    }
    if (basic_type_counts) {
        free(basic_type_counts);
    }
#endif

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
Example #19
0
int main(int argc, char *argv[])
{
    int errs = 0;
    int size, rank;
    int minsize = 2, count;
    MPI_Comm comm;
    int *buf, *bufout;
    MPI_Op op;
    MPI_Datatype mattype;
    int i;

    MTest_Init(&argc, &argv);

    MPI_Op_create(matmult, 0, &op);

    /* A single rotation matrix (3x3, stored as 9 consequetive elements) */
    MPI_Type_contiguous(9, MPI_INT, &mattype);
    MPI_Type_commit(&mattype);

    /* Sanity check: test that our routines work properly */
    {
        int one = 1;
        buf = (int *) malloc(4 * 9 * sizeof(int));
        initMat(0, 4, 0, &buf[0]);
        initMat(1, 4, 0, &buf[9]);
        initMat(2, 4, 0, &buf[18]);
        initMat(3, 4, 0, &buf[27]);
        matmult(&buf[0], &buf[9], &one, &mattype);
        matmult(&buf[9], &buf[18], &one, &mattype);
        matmult(&buf[18], &buf[27], &one, &mattype);
        checkResult(1, &buf[27], "Sanity Check");
        free(buf);
    }

    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;

        MPI_Comm_size(comm, &size);
        MPI_Comm_rank(comm, &rank);

        for (count = 1; count < size; count++) {

            /* Allocate the matrices */
            buf = (int *) malloc(count * 9 * sizeof(int));
            if (!buf) {
                MPI_Abort(MPI_COMM_WORLD, 1);
            }

            bufout = (int *) malloc(count * 9 * sizeof(int));
            if (!bufout) {
                MPI_Abort(MPI_COMM_WORLD, 1);
            }

            for (i = 0; i < count; i++) {
                initMat(rank, size, i, &buf[i * 9]);
            }

            MPI_Allreduce(buf, bufout, count, mattype, op, comm);
            errs += checkResult(count, bufout, "");

            /* Try the same test, but using MPI_IN_PLACE */
            for (i = 0; i < count; i++) {
                initMat(rank, size, i, &bufout[i * 9]);
            }
            MPI_Allreduce(MPI_IN_PLACE, bufout, count, mattype, op, comm);
            errs += checkResult(count, bufout, "IN_PLACE");

            free(buf);
            free(bufout);
        }
        MTestFreeComm(&comm);
    }

    MPI_Op_free(&op);
    MPI_Type_free(&mattype);

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Example #20
0
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Aint extent;
    MTestDatatype sendtype, recvtype;

    MTest_Init(&argc, &argv);

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        source = 0;
        dest = size - 1;

        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
                /* Make sure that everyone has a recv buffer */
                recvtype.InitBuf(&recvtype);

                MPI_Type_extent(recvtype.datatype, &extent);
                MPI_Win_create(recvtype.buf, recvtype.count * extent,
                               (int) extent, MPI_INFO_NULL, comm, &win);
                MPI_Win_fence(0, win);
                if (rank == source) {
                    sendtype.InitBuf(&sendtype);

                    /* To improve reporting of problems about operations, we
                     * change the error handler to errors return */
                    MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

                    /* MPI_REPLACE on accumulate is almost the same
                     * as MPI_Put; the only difference is in the
                     * handling of overlapping accumulate operations,
                     * which are not tested here */
                    err = MPI_Accumulate(sendtype.buf, sendtype.count,
                                         sendtype.datatype, dest, 0,
                                         recvtype.count, recvtype.datatype, MPI_REPLACE, win);
                    if (err) {
                        errs++;
                        if (errs < 10) {
                            printf("Accumulate types: send %s, recv %s\n",
                                   MTestGetDatatypeName(&sendtype),
                                   MTestGetDatatypeName(&recvtype));
                            MTestPrintError(err);
                        }
                    }
                    err = MPI_Win_fence(0, win);
                    if (err) {
                        errs++;
                        if (errs < 10) {
                            MTestPrintError(err);
                        }
                    }
                }
                else if (rank == dest) {
                    MPI_Win_fence(0, win);
                    /* This should have the same effect, in terms of
                     * transfering data, as a send/recv pair */
                    err = MTestCheckRecv(0, &recvtype);
                    if (err) {
                        errs += err;
                    }
                }
                else {
                    MPI_Win_fence(0, win);
                }
                MPI_Win_free(&win);
                MTestFreeDatatype(&sendtype);
                MTestFreeDatatype(&recvtype);
            }
        }
        MTestFreeComm(&comm);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Example #21
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count, nmsg, maxmsg;
    MPI_Comm      comm;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
        if (comm == MPI_COMM_NULL) continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank( comm, &rank );
        MPI_Comm_size( comm, &size );
        source = 0;
        dest   = size - 1;

        /* To improve reporting of problems about operations, we
           change the error handler to errors return */
        MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );

        for (count = 1; count < MAX_COUNT; count = count * 2) {
            while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
                int nbytes;
                MPI_Type_size( sendtype.datatype, &nbytes );

                /* We may want to limit the total message size sent */
                if (nbytes > MAX_MSG_SIZE) {
                    /* We do not need to free, as we haven't
                       initialized any of the buffers (?) */
                    continue;
                }
                maxmsg = MAX_COUNT - count;
                MTestPrintfMsg( 1, "Sending count = %d of sendtype %s of total size %d bytes\n",
                                count, MTestGetDatatypeName( &sendtype ),
                                nbytes*count );
                /* Make sure that everyone has a recv buffer */
                recvtype.InitBuf( &recvtype );

                if (rank == source) {
                    sendtype.InitBuf( &sendtype );

                    for (nmsg=1; nmsg<maxmsg; nmsg++) {
                        err = MPI_Send( sendtype.buf, sendtype.count,
                                        sendtype.datatype, dest, 0, comm);
                        if (err) {
                            errs++;
                            if (errs < 10) {
                                MTestPrintError( err );
                            }
                        }
                    }
                }
                else if (rank == dest) {
                    for (nmsg=1; nmsg<maxmsg; nmsg++) {
                        err = MPI_Recv( recvtype.buf, recvtype.count,
                                        recvtype.datatype, source, 0,
                                        comm, MPI_STATUS_IGNORE);
                        if (err) {
                            errs++;
                            if (errs < 10) {
                                MTestPrintError( err );
                            }
                        }

                        err = MTestCheckRecv( 0, &recvtype );
                        if (err) {
                            if (errs < 10) {
                                printf( "Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d, message iteration %d of %d\n",
                                        MTestGetDatatypeName( &recvtype ),
                                        MTestGetDatatypeName( &sendtype ),
                                        count, nmsg, maxmsg );
                                recvtype.printErrors = 1;
                                (void)MTestCheckRecv( 0, &recvtype );
                            }
                            errs += err;
                        }
                    }
                }
                MTestFreeDatatype( &recvtype );
                MTestFreeDatatype( &sendtype );
            }
        }
        MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #22
0
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size;
    int minsize = 2, count;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Aint lb, extent;
    MTestDatatype sendtype, recvtype;

    MTest_Init(&argc, &argv);

    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;

        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        int source = 0;

        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
                recvtype.printErrors = 1;
                recvtype.InitBuf(&recvtype);
                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);

                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
                               (int) extent, MPI_INFO_NULL, comm, &win);
                if (rank == source) {
                    int dest;
                    sendtype.InitBuf(&sendtype);

                    MPI_Win_lock_all(0, win);
                    for (dest = 0; dest < size; dest++)
                        if (dest != source) {
                            MPI_Accumulate(sendtype.buf, sendtype.count,
                                           sendtype.datatype, dest, 0,
                                           recvtype.count, recvtype.datatype, MPI_REPLACE, win);
                        }
                    MPI_Win_unlock_all(win);
                    MPI_Barrier(comm);

                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));

                    /*wait for the destinations to finish checking and reinitializing the buffers */
                    MPI_Barrier(comm);

                    MPI_Win_lock_all(0, win);
                    for (dest = 0; dest < size; dest++)
                        if (dest != source) {
                            MPI_Get_accumulate(sendtype.buf, sendtype.count,
                                               sendtype.datatype, resbuf, recvtype.count,
                                               recvtype.datatype, dest, 0, recvtype.count,
                                               recvtype.datatype, MPI_REPLACE, win);

                        }
                    MPI_Win_unlock_all(win);
                    MPI_Barrier(comm);
                    free(resbuf);
                }
                else {
                    int err;
                    MPI_Barrier(comm);
                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
                    err = MTestCheckRecv(0, &recvtype);
                    if (err)
                        errs++;
                    recvtype.InitBuf(&recvtype);
                    MPI_Win_unlock(rank, win);

                    /*signal the source that checking and reinitialization is done */
                    MPI_Barrier(comm);

                    MPI_Barrier(comm);
                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
                    err = MTestCheckRecv(0, &recvtype);
                    if (err)
                        errs++;
                    MPI_Win_unlock(rank, win);
                }

                MPI_Win_free(&win);
                MTestFreeDatatype(&sendtype);
                MTestFreeDatatype(&recvtype);
            }
        }
        MTestFreeComm(&comm);
    }
    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
Example #23
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size;
    int minsize = 2, count; 
    MPI_Comm      comm;
    int *sendbuf, *recvbuf, *p;
    int sendcount, recvcount;
    int i, j;
    MPI_Datatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;

	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	
	/* printf( "Size of comm = %d\n", size ); */
	for (count = 1; count < 65000; count = count * 2) {
	    
	    /* Create a send buf and a receive buf suitable for testing
	       all to all.  */
	    sendcount = count;
	    recvcount = count;
	    sendbuf   = (int *)malloc( count * size * sizeof(int) );
	    recvbuf   = (int *)malloc( count * size * sizeof(int) );
	    sendtype  = MPI_INT;
	    recvtype  = MPI_INT;

	    if (!sendbuf || !recvbuf) {
		errs++;
		fprintf( stderr, "Failed to allocate sendbuf and/or recvbuf\n" );
		MPI_Abort( MPI_COMM_WORLD, 1 );
	    }
	    for (i=0; i<count*size; i++) 
		recvbuf[i] = -1;
	    p = sendbuf;
	    for (j=0; j<size; j++) {
		for (i=0; i<count; i++) {
		    *p++ = j * size + rank + i;
		}
	    }

	    MPI_Alltoall( sendbuf, sendcount, sendtype,
			  recvbuf, recvcount, recvtype, comm );

	    p = recvbuf;
	    for (j=0; j<size; j++) {
		for (i=0; i<count; i++) {
		    if (*p != rank * size + j + i) {
			errs++;
			if (errs < 10) {
			    fprintf( stderr, "Error with communicator %s and size=%d count=%d\n",
				     MTestGetIntracommName(), size, count );
			    fprintf( stderr, "recvbuf[%d,%d] = %d, should %d\n",
				     j,i, *p, rank * size + j + i );
			}
		    }
		    p++;
		}
	    }

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
            /* check MPI_IN_PLACE, added in MPI-2.2 */
            p = recvbuf;
            for (j=0; j<size; j++) {
                for (i=0; i<count; i++) {
                    *p++ = j * size + rank + i;
                }
            }
            MPI_Alltoall( MPI_IN_PLACE, 0/*ignored*/, MPI_INT/*ignored*/,
                          recvbuf, recvcount, recvtype, comm );
            p = recvbuf;
            for (j=0; j<size; j++) {
                for (i=0; i<count; i++) {
                    if (*p != rank * size + j + i) {
                        errs++;
                        if (errs < 10) {
                            fprintf( stderr, "Error (MPI_IN_PLACE) with communicator %s and size=%d count=%d\n",
                                     MTestGetIntracommName(), size, count );
                            fprintf(stderr, "recvbuf[%d,%d] = %d, should be %d\n",
                                    j,i, *p, rank * size + j + i );
                        }
                    }
                    p++;
                }
            }
#endif

	    free( recvbuf );
	    free( sendbuf );
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #24
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size;
    int minsize = 2, count; 
    int *sendbuf, *recvbuf, i;
    MPI_Comm      comm;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;

	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	
	for (count = 1; count < 65000; count = count * 2) {

	    sendbuf = (int *)malloc( count * sizeof(int) );
	    recvbuf = (int *)malloc( count * sizeof(int) );

	    for (i=0; i<count; i++) {
		sendbuf[i] = rank + i * size;
		recvbuf[i] = -1;
	    }
	    
	    MPI_Exscan( sendbuf, recvbuf, count, MPI_INT, MPI_SUM, comm );

	    /* Check the results.  rank 0 has no data */
	    if (rank > 0) {
		int result;
		for (i=0; i<count; i++) {
		    result = rank * i * size + ((rank) * (rank-1))/2;
		    if (recvbuf[i] != result) {
			errs++;
			if (errs < 10) {
			    fprintf( stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
				     i, recvbuf[i], rank, result );
			}
		    }
		}
	    }

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
            /* now try the MPI_IN_PLACE flavor */
            for (i=0; i<count; i++) {
                sendbuf[i] = -1; /* unused */
                recvbuf[i] = rank + i * size;
            }

            MPI_Exscan( MPI_IN_PLACE, recvbuf, count, MPI_INT, MPI_SUM, comm );

            /* Check the results.  rank 0's data must remain unchanged */
            for (i=0; i<count; i++) {
                int result;
                if (rank == 0)
                    result = rank + i * size;
                else
                    result = rank * i * size + ((rank) * (rank-1))/2;
                if (recvbuf[i] != result) {
                    errs++;
                    if (errs < 10) {
                        fprintf( stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
                                 i, recvbuf[i], rank, result );
                    }
                }
            }

            MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
            /* Make sure that we check for buffer aliasing properly */
            if (MPI_SUCCESS == MPI_Exscan( recvbuf, recvbuf, count, MPI_INT, MPI_SUM, comm ))
                errs++;
#endif

	    free( sendbuf );
	    free( recvbuf );
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #25
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, root;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	
	count = 1;
	MTEST_DATATYPE_FOR_EACH_COUNT(count) {

        /* To shorten test time, only run the default version of datatype tests
         * for comm world and run the minimum version for other communicators. */
        if (comm != MPI_COMM_WORLD) {
            MTestInitMinDatatypes();
        }

	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
		for (root=0; root<size; root++) {
		    if (rank == root) {
			sendtype.InitBuf( &sendtype );
			err = MPI_Bcast( sendtype.buf, sendtype.count,
					 sendtype.datatype, root, comm );
			if (err) {
			    errs++;
			    MTestPrintError( err );
			}
		    }
		    else {
			recvtype.InitBuf( &recvtype );
			err = MPI_Bcast( recvtype.buf, recvtype.count, 
				    recvtype.datatype, root, comm );
			if (err) {
			    errs++;
			    fprintf( stderr, "Error with communicator %s and datatype %s\n", 
				 MTestGetIntracommName(), 
				 MTestGetDatatypeName( &recvtype ) );
			    MTestPrintError( err );
			}
			err = MTestCheckRecv( 0, &recvtype );
			if (err) {
			    errs += errs;
			}
		    }
		}
		MTestFreeDatatype( &recvtype );
		MTestFreeDatatype( &sendtype );
	    }
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #26
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, root;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;

	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	
	/* To improve reporting of problems about operations, we
	   change the error handler to errors return */
	MPI_Errhandler_set( comm, MPI_ERRORS_RETURN );

	/* The max value of count must be very large to ensure that we 
	   reach the long message algorithms */
	for (count = 1; count < 2800; count = count * 4) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
		for (root=0; root<size; root++) {
		    if (rank == root) {
			sendtype.InitBuf( &sendtype );
			err = MPI_Bcast( sendtype.buf, sendtype.count,
					 sendtype.datatype, root, comm );
			if (err) {
			    errs++;
			    MTestPrintError( err );
			}
		    }
		    else {
			recvtype.InitBuf( &recvtype );
			err = MPI_Bcast( recvtype.buf, recvtype.count, 
				    recvtype.datatype, root, comm );
			if (err) {
			    errs++;
			    fprintf( stderr, "Error with communicator %s and datatype %s\n", 
				 MTestGetIntracommName(), 
				 MTestGetDatatypeName( &recvtype ) );
			    MTestPrintError( err );
			}
			err = MTestCheckRecv( 0, &recvtype );
			if (err) {
			    errs += errs;
			}
		    }
		}
		MTestFreeDatatype( &recvtype );
		MTestFreeDatatype( &sendtype );
	    }
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
Example #27
0
int main( int argc, char **argv )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Win       win;
    MPI_Aint      extent;
    MTestDatatype sendtype, recvtype;
    int           onlyInt = 0;

    MTest_Init( &argc, &argv );
    /* Check for a simple choice of communicator and datatypes */
    if (getenv( "MTEST_SIMPLE" )) onlyInt = 1;

    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {

		MTestPrintfMsg( 1, 
		       "Putting count = %d of sendtype %s receive type %s\n", 
				count, MTestGetDatatypeName( &sendtype ),
				MTestGetDatatypeName( &recvtype ) );

		/* Make sure that everyone has a recv buffer */
		recvtype.InitBuf( &recvtype );

		MPI_Type_extent( recvtype.datatype, &extent );
		MPI_Win_create( recvtype.buf, recvtype.count * extent, 
				extent, MPI_INFO_NULL, comm, &win );
		/* To improve reporting of problems about operations, we
		   change the error handler to errors return */
		MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN );

		/* At this point, we have all of the elements that we 
		   need to begin the multiple fence and put tests */
		/* Fence 1 */
		err = MPI_Win_fence( MPI_MODE_NOPRECEDE, win ); 
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* Source puts */
		if (rank == source) {
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}

		/* Fence 2 */
		err = MPI_Win_fence( 0, win );
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* dest checks data, then Dest puts */
		if (rank == dest) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "fence 2", &sendtype, &recvtype );
			}
		    }
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, source, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}

		/* Fence 3 */
		err = MPI_Win_fence( 0, win );
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* src checks data, then Src and dest puts*/
		if (rank == source) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "fence 3", &sendtype, &recvtype );
			}
		    }
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}
		if (rank == dest) {
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, source, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}

		/* Fence 4 */
		err = MPI_Win_fence( MPI_MODE_NOSUCCEED, win );
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* src and dest checks data */
		if (rank == source) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "src fence4", &sendtype, &recvtype );
			}
		    }
		}
		if (rank == dest) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "dest fence4", &sendtype, &recvtype );
			}
		    }
		}

		MPI_Win_free( &win );
		MTestFreeDatatype( &sendtype );
		MTestFreeDatatype( &recvtype );

		/* Only do one datatype in the simple case */
		if (onlyInt) break;
	    }
	    /* Only do one count in the simple case */
	    if (onlyInt) break;
	}
        MTestFreeComm(&comm);
	/* Only do one communicator in the simple case */
	if (onlyInt) break;
    }

    MTest_Finalize( errs );

    
    
    MPI_Finalize();
    return 0;
}
Example #28
0
int main( int argc, char **argv )
{

    MPI_Comm comm;
    int      *sbuf, *rbuf;
    int      rank, size;
    int      *sendcounts, *recvcounts, *rdispls, *sdispls;
    int      i, j, *p, err;
    
    MTest_Init( &argc, &argv );
    err = 0;
    
    while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
      if (comm == MPI_COMM_NULL) continue;

      /* Create the buffer */
      MPI_Comm_size( comm, &size );
      MPI_Comm_rank( comm, &rank );
      sbuf = (int *)malloc( size * size * sizeof(int) );
      rbuf = (int *)malloc( size * size * sizeof(int) );
      if (!sbuf || !rbuf) {
	fprintf( stderr, "Could not allocated buffers!\n" );
	MPI_Abort( comm, 1 );
      }
      
      /* Load up the buffers */
      for (i=0; i<size*size; i++) {
	sbuf[i] = i + 100*rank;
	rbuf[i] = -i;
      }
      
      /* Create and load the arguments to alltoallv */
      sendcounts = (int *)malloc( size * sizeof(int) );
      recvcounts = (int *)malloc( size * sizeof(int) );
      rdispls    = (int *)malloc( size * sizeof(int) );
      sdispls    = (int *)malloc( size * sizeof(int) );
      if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
	fprintf( stderr, "Could not allocate arg items!\n" );
	MPI_Abort( comm, 1 );
      }
      for (i=0; i<size; i++) {
	sendcounts[i] = i;
	recvcounts[i] = rank;
	rdispls[i]    = i * rank;
	sdispls[i]    = (i * (i+1))/2;
      }
      MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
		     rbuf, recvcounts, rdispls, MPI_INT, comm );
      
      /* Check rbuf */
      for (i=0; i<size; i++) {
	p = rbuf + rdispls[i];
	for (j=0; j<rank; j++) {
	  if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
	    fprintf( stderr, "[%d] got %d expected %d for %dth\n",
		     rank, p[j],(i*(i+1))/2 + j, j );
	    err++;
	  }
	}
      }

      free( sdispls );
      free( sendcounts );
      free( sbuf );

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
      /* check MPI_IN_PLACE, added in MPI-2.2 */
      free( rbuf );
      rbuf = (int *)malloc( size * (2 * size) * sizeof(int) );
      if (!rbuf) {
        fprintf( stderr, "Could not reallocate rbuf!\n" );
        MPI_Abort( comm, 1 );
      }

      /* Load up the buffers */
      for (i = 0; i < size; i++) {
        recvcounts[i] = i + rank;
        rdispls[i]    = i * (2 * size);
      }
      memset(rbuf, -1, size * (2 * size) * sizeof(int));
      for (i=0; i < size; i++) {
        p = rbuf + rdispls[i];
        for (j = 0; j < recvcounts[i]; ++j) {
          p[j] = 100 * rank + 10 * i + j;
        }
      }
      MPI_Alltoallv( MPI_IN_PLACE, NULL, NULL, MPI_INT,
                     rbuf, recvcounts, rdispls, MPI_INT, comm );
      /* Check rbuf */
      for (i=0; i<size; i++) {
        p = rbuf + rdispls[i];
        for (j=0; j<recvcounts[i]; j++) {
          int expected = 100 * i + 10 * rank + j;
          if (p[j] != expected) {
            fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
                    rank, p[j], expected, i, j);
            ++err;
          }
        }
      }

      /* Check to make sure that aliasing is disallowed correctly */
      MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
      if (MPI_SUCCESS == MPI_Alltoallv(rbuf, recvcounts, rdispls, MPI_INT,
                                       rbuf, recvcounts, rdispls, MPI_INT, comm))
          err++;
#endif

      free( rdispls );
      free( recvcounts );
      free( rbuf );
      MTestFreeComm( &comm );
    }

    MTest_Finalize( err );
    MPI_Finalize();
    return 0;
}
Example #29
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Win       win;
    MPI_Aint      extent;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {

		MTestPrintfMsg( 1, 
		       "Putting count = %d of sendtype %s receive type %s\n", 
				count, MTestGetDatatypeName( &sendtype ),
				MTestGetDatatypeName( &recvtype ) );

		/* Make sure that everyone has a recv buffer */
		recvtype.InitBuf( &recvtype );

		MPI_Type_extent( recvtype.datatype, &extent );
		MPI_Win_create( recvtype.buf, recvtype.count * extent, 
				extent, MPI_INFO_NULL, comm, &win );
		MPI_Win_fence( 0, win );
		if (rank == source) {
		    /* To improve reporting of problems about operations, we
		       change the error handler to errors return */
		    MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN );

		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		    err = MPI_Win_fence( 0, win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		}
		else if (rank == dest) {
		    MPI_Win_fence( 0, win );
		    /* This should have the same effect, in terms of
		       transfering data, as a send/recv pair */
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) {
			if (errs < 10) {
			    printf( "Data in target buffer did not match for destination datatype %s (put with source datatype %s)\n", 
				    MTestGetDatatypeName( &recvtype ),
				    MTestGetDatatypeName( &sendtype ) );
			    /* Redo the test, with the errors printed */
			    recvtype.printErrors = 1;
			    (void)MTestCheckRecv( 0, &recvtype );
			}
			errs += err;
		    }
		}
		else {
		    MPI_Win_fence( 0, win );
		}
		MPI_Win_free( &win );
		MTestFreeDatatype( &sendtype );
		MTestFreeDatatype( &recvtype );
	    }
	}
        MTestFreeComm(&comm);
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}