コード例 #1
0
ファイル: lockall_dt.c プロジェクト: NexMirror/MPICH
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size;
    int minsize = 2, count;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Aint lb, extent;
    MTestDatatype sendtype, recvtype;

    MTest_Init(&argc, &argv);

    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;

        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        int source = 0;

        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
                recvtype.printErrors = 1;
                recvtype.InitBuf(&recvtype);
                MPI_Type_get_extent(recvtype.datatype, &lb, &extent);

                MPI_Win_create(recvtype.buf, lb + recvtype.count * extent,
                               (int) extent, MPI_INFO_NULL, comm, &win);
                if (rank == source) {
                    int dest;
                    sendtype.InitBuf(&sendtype);

                    MPI_Win_lock_all(0, win);
                    for (dest = 0; dest < size; dest++)
                        if (dest != source) {
                            MPI_Accumulate(sendtype.buf, sendtype.count,
                                           sendtype.datatype, dest, 0,
                                           recvtype.count, recvtype.datatype, MPI_REPLACE, win);
                        }
                    MPI_Win_unlock_all(win);
                    MPI_Barrier(comm);

                    char *resbuf = (char *) calloc(lb + extent * recvtype.count, sizeof(char));

                    /*wait for the destinations to finish checking and reinitializing the buffers */
                    MPI_Barrier(comm);

                    MPI_Win_lock_all(0, win);
                    for (dest = 0; dest < size; dest++)
                        if (dest != source) {
                            MPI_Get_accumulate(sendtype.buf, sendtype.count,
                                               sendtype.datatype, resbuf, recvtype.count,
                                               recvtype.datatype, dest, 0, recvtype.count,
                                               recvtype.datatype, MPI_REPLACE, win);

                        }
                    MPI_Win_unlock_all(win);
                    MPI_Barrier(comm);
                    free(resbuf);
                }
                else {
                    int err;
                    MPI_Barrier(comm);
                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
                    err = MTestCheckRecv(0, &recvtype);
                    if (err)
                        errs++;
                    recvtype.InitBuf(&recvtype);
                    MPI_Win_unlock(rank, win);

                    /*signal the source that checking and reinitialization is done */
                    MPI_Barrier(comm);

                    MPI_Barrier(comm);
                    MPI_Win_lock(MPI_LOCK_SHARED, rank, 0, win);
                    err = MTestCheckRecv(0, &recvtype);
                    if (err)
                        errs++;
                    MPI_Win_unlock(rank, win);
                }

                MPI_Win_free(&win);
                MTestFreeDatatype(&sendtype);
                MTestFreeDatatype(&recvtype);
            }
        }
        MTestFreeComm(&comm);
    }
    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
コード例 #2
0
ファイル: comm_idup_comm.c プロジェクト: ParaStation/psmpi2
int main(int argc, char **argv)
{
    int errs = 0;
    int i;
    int rank, size;
    int *excl;
    int ranges[1][3];
    int isLeft, rleader;
    MPI_Group world_group, high_group, even_group;
    MPI_Comm local_comm, inter_comm, test_comm, outcomm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MTest_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);
    MPI_Comm_group(MPI_COMM_WORLD, &world_group);

    if (size < 2) {
        printf("this test requires at least 2 processes\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }

    /* Idup MPI_COMM_WORLD multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(MPI_COMM_WORLD, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */

    /* Comm_dup */
    MPI_Comm_dup(MPI_COMM_WORLD, &outcomm);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(MPI_COMM_WORLD, rank % 2, size - rank, &outcomm);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of MPI_COMM_WORLD */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(world_group, 1, ranges, &high_group);
    MPI_Comm_create(MPI_COMM_WORLD, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of MPI_COMM_WORLD */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(world_group, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(MPI_COMM_WORLD, even_group, 0, &outcomm);
    } else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);

    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(MPI_COMM_WORLD, (rank < size / 2), rank, &local_comm);

    if (rank == 0) {
        rleader = size / 2;
    } else if (rank == size / 2) {
        rleader = 0;
    } else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, MPI_COMM_WORLD, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);

    errs += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }

    MPI_Group_free(&world_group);

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
コード例 #3
0
ファイル: sendrecvt4.c プロジェクト: adevress/MPICH-BlueGene
/*
   This program is derived from one in the MPICH-1 test suite

   This version sends and receives EVERYTHING from MPI_BOTTOM, by putting
   the data into a structure.
 */
int main( int argc, char **argv )
{
    MPI_Datatype *types;
    void         **inbufs, **outbufs;
    int          *counts, *bytesize, ntype;
    MPI_Comm     comm;
    int          ncomm = 20, rank, np, partner, tag, count;
    int          j, k, err, toterr, world_rank, errloc;
    MPI_Status   status;
    char         *obuf;
    MPI_Datatype offsettype;
    int          blen;
    MPI_Aint     displ, extent, natural_extent;
    char         myname[MPI_MAX_OBJECT_NAME];
    int          mynamelen;

    MTest_Init( &argc, &argv );

    MTestDatatype2Allocate( &types, &inbufs, &outbufs, &counts, &bytesize,
			    &ntype );
    MTestDatatype2Generate( types, inbufs, outbufs, counts, bytesize, &ntype );

    MPI_Comm_rank( MPI_COMM_WORLD, &world_rank );

    /* Test over a wide range of datatypes and communicators */
    err = 0;
    tag = 0;
    while (MTestGetIntracomm( &comm, 2 )) {
	if (comm == MPI_COMM_NULL) continue;
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &np );
	if (np < 2) continue;
	tag++;
	for (j=0; j<ntype; j++) {
	    MPI_Type_get_name( types[j], myname, &mynamelen );
	    if (world_rank == 0)
		MTestPrintfMsg( 10, "Testing type %s\n", myname );
	    if (rank == 0) {
		MPI_Get_address( inbufs[j], &displ );
		blen = 1;
		MPI_Type_create_struct( 1, &blen, &displ, types + j,
					&offsettype );
		MPI_Type_commit( &offsettype );
		/* Warning: if the type has an explicit MPI_UB, then using a
		   simple shift of the offset won't work.  For now, we skip
		   types whose extents are negative; the correct solution is
		   to add, where required, an explicit MPI_UB */
		MPI_Type_extent( offsettype, &extent );
		if (extent < 0) {
		    if (world_rank == 0)
			MTestPrintfMsg( 10,
			"... skipping (appears to have explicit MPI_UB\n" );
		    MPI_Type_free( &offsettype );
		    continue;
		}
		MPI_Type_extent( types[j], &natural_extent );
		if (natural_extent != extent) {
		    MPI_Type_free( &offsettype );
		    continue;
		}
		partner = np - 1;
		MPI_Send( MPI_BOTTOM, counts[j], offsettype, partner, tag,
			  comm );
		MPI_Type_free( &offsettype );
            }
	    else if (rank == np-1) {
		partner = 0;
		obuf = outbufs[j];
		for (k=0; k<bytesize[j]; k++)
		    obuf[k] = 0;
		MPI_Get_address( outbufs[j], &displ );
		blen = 1;
		MPI_Type_create_struct( 1, &blen, &displ, types + j,
					&offsettype );
		MPI_Type_commit( &offsettype );
		/* Warning: if the type has an explicit MPI_UB, then using a
		   simple shift of the offset won't work.  For now, we skip
		   types whose extents are negative; the correct solution is
		   to add, where required, an explicit MPI_UB */
		MPI_Type_extent( offsettype, &extent );
		if (extent < 0) {
		    MPI_Type_free( &offsettype );
		    continue;
		}
		MPI_Type_extent( types[j], &natural_extent );
		if (natural_extent != extent) {
		    MPI_Type_free( &offsettype );
		    continue;
		}
		MPI_Recv( MPI_BOTTOM, counts[j], offsettype,
			  partner, tag, comm, &status );
		/* Test for correctness */
		MPI_Get_count( &status, types[j], &count );
		if (count != counts[j]) {
		    fprintf( stderr,
			"Error in counts (got %d expected %d) with type %s\n",
			 count, counts[j], myname );
		    err++;
                }
		if (status.MPI_SOURCE != partner) {
		    fprintf( stderr,
			"Error in source (got %d expected %d) with type %s\n",
			 status.MPI_SOURCE, partner, myname );
		    err++;
                }
		if ((errloc = MTestDatatype2Check( inbufs[j], outbufs[j],
						   bytesize[j] ))) {
		    fprintf( stderr,
                  "Error in data with type %s (type %d on %d) at byte %d\n",
			 myname, j, world_rank, errloc - 1 );
		    if (err < 10) {
			/* Give details on only the first 10 errors */
			unsigned char *in_p = (unsigned char *)inbufs[j],
			    *out_p = (unsigned char *)outbufs[j];
			int jj;
			jj = errloc - 1;
			jj &= 0xfffffffc; /* lop off a few bits */
			in_p += jj;
			out_p += jj;
			fprintf( stderr, "%02x%02x%02x%02x should be %02x%02x%02x%02x\n",
				 out_p[0], out_p[1], out_p[2], out_p[3],
				 in_p[0], in_p[1], in_p[2], in_p[3] );
		    }
		    err++;
                }
		MPI_Type_free( &offsettype );
            }
	}
	MTestFreeComm( &comm );
    }

    MTestDatatype2Free( types, inbufs, outbufs, counts, bytesize, ntype );
    MTest_Finalize( err );
    MPI_Finalize();
    return MTestReturnValue( err );
}
コード例 #4
0
ファイル: alltoallv0.c プロジェクト: Julio-Anjos/simgrid
int main( int argc, char **argv )
{

    MPI_Comm comm;
    int      *sbuf, *rbuf;
    int      rank, size;
    int      *sendcounts, *recvcounts, *rdispls, *sdispls;
    int      i, *p, err;
    int      left, right, length;
    
    MTest_Init( &argc, &argv );
    err = 0;
    
    while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
      if (comm == MPI_COMM_NULL) continue;

      MPI_Comm_size( comm, &size );
      MPI_Comm_rank( comm, &rank );
      
      if (size < 3) continue;

      /* Create and load the arguments to alltoallv */
      sendcounts = (int *)malloc( size * sizeof(int) );
      recvcounts = (int *)malloc( size * sizeof(int) );
      rdispls    = (int *)malloc( size * sizeof(int) );
      sdispls    = (int *)malloc( size * sizeof(int) );
      if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
	fprintf( stderr, "Could not allocate arg items!\n" );
	MPI_Abort( comm, 1 );
        exit(1);
      }

      /* Get the neighbors */
      left  = (rank - 1 + size) % size;
      right = (rank + 1) % size;

      /* Set the defaults */
      for (i=0; i<size; i++) {
	  sendcounts[i] = 0;
	  recvcounts[i] = 0;
	  rdispls[i]    = 0;
	  sdispls[i]    = 0;
      }

      for (length=1; length < 66000; length = length*2+1 ) {
	  /* Get the buffers */
	  sbuf = (int *)malloc( 2 * length * sizeof(int) );
	  rbuf = (int *)malloc( 2 * length * sizeof(int) );
	  if (!sbuf || !rbuf) {
	      fprintf( stderr, "Could not allocate buffers!\n" );
	      MPI_Abort( comm, 1 );
              exit(1);
	  }
	  
	  /* Load up the buffers */
	  for (i=0; i<length; i++) {
	      sbuf[i]        = i + 100000*rank;
	      sbuf[i+length] = i + 100000*rank;
	      rbuf[i]        = -i;
	      rbuf[i+length] = -i-length;
	  }
	  sendcounts[left]  = length;
	  sendcounts[right] = length;
	  recvcounts[left]  = length;
	  recvcounts[right] = length;
	  rdispls[left]     = 0;
	  rdispls[right]    = length;
	  sdispls[left]     = 0;
	  sdispls[right]    = length;
      
	  MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
			 rbuf, recvcounts, rdispls, MPI_INT, comm );
      
	  /* Check rbuf */
	  p = rbuf;          /* left */

	  for (i=0; i<length; i++) {
	      if (p[i] != i + 100000 * left) {
		  if (err < 10) {
		      fprintf( stderr, "[%d from %d] got %d expected %d for %dth\n", 
			       rank, left, p[i], i + 100000 * left, i );
		  }
		  err++;
	      }
	  }

	  p = rbuf + length; /* right */
	  for (i=0; i<length; i++) {
	      if (p[i] != i + 100000 * right) {
		  if (err < 10) {
		      fprintf( stderr, "[%d from %d] got %d expected %d for %dth\n", 
			       rank, right, p[i], i + 100000 * right, i );
		  }
		  err++;
	      }
	  }

	  free( rbuf );
	  free( sbuf );
      }
	  
      free( sdispls );
      free( rdispls );
      free( recvcounts );
      free( sendcounts );
      MTestFreeComm( &comm );
    }

    MTest_Finalize( err );
    MPI_Finalize();
    return 0;
}
コード例 #5
0
ファイル: exscan.c プロジェクト: Julio-Anjos/simgrid
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size;
    int minsize = 2, count; 
    int *sendbuf, *recvbuf, i;
    MPI_Comm      comm;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;

	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	
	for (count = 1; count < 65000; count = count * 2) {

	    sendbuf = (int *)malloc( count * sizeof(int) );
	    recvbuf = (int *)malloc( count * sizeof(int) );

	    for (i=0; i<count; i++) {
		sendbuf[i] = rank + i * size;
		recvbuf[i] = -1;
	    }
	    
	    MPI_Exscan( sendbuf, recvbuf, count, MPI_INT, MPI_SUM, comm );

	    /* Check the results.  rank 0 has no data */
	    if (rank > 0) {
		int result;
		for (i=0; i<count; i++) {
		    result = rank * i * size + ((rank) * (rank-1))/2;
		    if (recvbuf[i] != result) {
			errs++;
			if (errs < 10) {
			    fprintf( stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
				     i, recvbuf[i], rank, result );
			}
		    }
		}
	    }

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
            /* now try the MPI_IN_PLACE flavor */
            for (i=0; i<count; i++) {
                sendbuf[i] = -1; /* unused */
                recvbuf[i] = rank + i * size;
            }

            MPI_Exscan( MPI_IN_PLACE, recvbuf, count, MPI_INT, MPI_SUM, comm );

            /* Check the results.  rank 0's data must remain unchanged */
            for (i=0; i<count; i++) {
                int result;
                if (rank == 0)
                    result = rank + i * size;
                else
                    result = rank * i * size + ((rank) * (rank-1))/2;
                if (recvbuf[i] != result) {
                    errs++;
                    if (errs < 10) {
                        fprintf( stderr, "Error in recvbuf[%d] = %d on %d, expected %d\n",
                                 i, recvbuf[i], rank, result );
                    }
                }
            }

            MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
            /* Make sure that we check for buffer aliasing properly */
            if (MPI_SUCCESS == MPI_Exscan( recvbuf, recvbuf, count, MPI_INT, MPI_SUM, comm ))
                errs++;
#endif

	    free( sendbuf );
	    free( recvbuf );
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #6
0
ファイル: icgatherv.c プロジェクト: Julio-Anjos/simgrid
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int *buf = 0;
    int *recvcounts;
    int *recvdispls;
    int leftGroup, i, count, rank, rsize, size;
    MPI_Comm comm;
    MPI_Datatype datatype;

    MTest_Init( &argc, &argv );

    datatype = MPI_INT;
    while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
	if (comm == MPI_COMM_NULL) continue;
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_remote_size( comm, &rsize );
	MPI_Comm_size( comm, &size );
		
	/* To improve reporting of problems about operations, we
	   change the error handler to errors return */
	MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );

	for (count = 1; count < 65000; count = 2 * count) {
	    /* Get an intercommunicator */
	    recvcounts = (int *)malloc( rsize * sizeof(int) );
	    recvdispls = (int *)malloc( rsize * sizeof(int) );
	    /* This simple test duplicates the Gather test, 
	       using the same lengths for all messages */
	    for (i=0; i<rsize; i++) {
		recvcounts[i] = count;
		recvdispls[i] = count * i;
	    }
	    if (leftGroup) {
		buf = (int *)malloc( count * rsize * sizeof(int) );
		for (i=0; i<count*rsize; i++) buf[i] = -1;

		err = MPI_Gatherv( NULL, 0, datatype,
				  buf, recvcounts, recvdispls, datatype, 
				 (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
				 comm );
		if (err) {
		    errs++;
		    MTestPrintError( err );
		}
		/* Test that no other process in this group received the 
		   broadcast */
		if (rank != 0) {
		    for (i=0; i<count; i++) {
			if (buf[i] != -1) {
			    errs++;
			}
		    }
		}
		else {
		    /* Check for the correct data */
		    for (i=0; i<count*rsize; i++) {
			if (buf[i] != i) {
			    errs++;
			}
		    }
		}
	    }
	    else {
		/* In the right group */
		buf = (int *)malloc( count * sizeof(int) );
		for (i=0; i<count; i++) buf[i] = rank * count + i;
		err = MPI_Gatherv( buf, count, datatype, 
				   NULL, 0, 0, datatype, 0, comm );
		if (err) {
		    errs++;
		    MTestPrintError( err );
		}
	    }
	    free( buf );
	    free( recvcounts );
	    free( recvdispls );
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #7
0
ファイル: epochtest.c プロジェクト: Julio-Anjos/simgrid
int main( int argc, char **argv )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Win       win;
    MPI_Aint      extent;
    MTestDatatype sendtype, recvtype;
    int           onlyInt = 0;

    MTest_Init( &argc, &argv );
    /* Check for a simple choice of communicator and datatypes */
    if (getenv( "MTEST_SIMPLE" )) onlyInt = 1;

    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {

		MTestPrintfMsg( 1, 
		       "Putting count = %d of sendtype %s receive type %s\n", 
				count, MTestGetDatatypeName( &sendtype ),
				MTestGetDatatypeName( &recvtype ) );

		/* Make sure that everyone has a recv buffer */
		recvtype.InitBuf( &recvtype );

		MPI_Type_extent( recvtype.datatype, &extent );
		MPI_Win_create( recvtype.buf, recvtype.count * extent, 
				extent, MPI_INFO_NULL, comm, &win );
		/* To improve reporting of problems about operations, we
		   change the error handler to errors return */
		MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN );

		/* At this point, we have all of the elements that we 
		   need to begin the multiple fence and put tests */
		/* Fence 1 */
		err = MPI_Win_fence( MPI_MODE_NOPRECEDE, win ); 
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* Source puts */
		if (rank == source) {
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}

		/* Fence 2 */
		err = MPI_Win_fence( 0, win );
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* dest checks data, then Dest puts */
		if (rank == dest) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "fence 2", &sendtype, &recvtype );
			}
		    }
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, source, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}

		/* Fence 3 */
		err = MPI_Win_fence( 0, win );
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* src checks data, then Src and dest puts*/
		if (rank == source) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "fence 3", &sendtype, &recvtype );
			}
		    }
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}
		if (rank == dest) {
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, source, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		}

		/* Fence 4 */
		err = MPI_Win_fence( MPI_MODE_NOSUCCEED, win );
		if (err) { if (errs++ < MAX_PERR) MTestPrintError(err); }
		/* src and dest checks data */
		if (rank == source) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "src fence4", &sendtype, &recvtype );
			}
		    }
		}
		if (rank == dest) {
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) { if (errs++ < MAX_PERR) { 
			    PrintRecvedError( "dest fence4", &sendtype, &recvtype );
			}
		    }
		}

		MPI_Win_free( &win );
		MTestFreeDatatype( &sendtype );
		MTestFreeDatatype( &recvtype );

		/* Only do one datatype in the simple case */
		if (onlyInt) break;
	    }
	    /* Only do one count in the simple case */
	    if (onlyInt) break;
	}
        MTestFreeComm(&comm);
	/* Only do one communicator in the simple case */
	if (onlyInt) break;
    }

    MTest_Finalize( errs );

    
    
    MPI_Finalize();
    return 0;
}
コード例 #8
0
ファイル: putfence1.c プロジェクト: Julio-Anjos/simgrid
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Win       win;
    MPI_Aint      extent;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {

		MTestPrintfMsg( 1, 
		       "Putting count = %d of sendtype %s receive type %s\n", 
				count, MTestGetDatatypeName( &sendtype ),
				MTestGetDatatypeName( &recvtype ) );

		/* Make sure that everyone has a recv buffer */
		recvtype.InitBuf( &recvtype );

		MPI_Type_extent( recvtype.datatype, &extent );
		MPI_Win_create( recvtype.buf, recvtype.count * extent, 
				extent, MPI_INFO_NULL, comm, &win );
		MPI_Win_fence( 0, win );
		if (rank == source) {
		    /* To improve reporting of problems about operations, we
		       change the error handler to errors return */
		    MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN );

		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				   sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		    err = MPI_Win_fence( 0, win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		}
		else if (rank == dest) {
		    MPI_Win_fence( 0, win );
		    /* This should have the same effect, in terms of
		       transfering data, as a send/recv pair */
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) {
			if (errs < 10) {
			    printf( "Data in target buffer did not match for destination datatype %s (put with source datatype %s)\n", 
				    MTestGetDatatypeName( &recvtype ),
				    MTestGetDatatypeName( &sendtype ) );
			    /* Redo the test, with the errors printed */
			    recvtype.printErrors = 1;
			    (void)MTestCheckRecv( 0, &recvtype );
			}
			errs += err;
		    }
		}
		else {
		    MPI_Win_fence( 0, win );
		}
		MPI_Win_free( &win );
		MTestFreeDatatype( &sendtype );
		MTestFreeDatatype( &recvtype );
	    }
	}
        MTestFreeComm(&comm);
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #9
0
ファイル: bsendpending.c プロジェクト: cemsbr/simgrid
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size, source, dest;
    unsigned char *buf, *bufp;
    int minsize = 2; 
    int i, msgsize, bufsize, outsize;
    unsigned char *msg1, *msg2, *msg3;
    MPI_Comm      comm;
    MPI_Status    status1, status2, status3;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    msgsize = 128 * 1024;
    msg1 = (unsigned char *)malloc( 3 * msgsize );
    msg2 = msg1 + msgsize;
    msg3 = msg2 + msgsize;
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;

	/* Here is the test:  The sender */
	if (rank == source) {
	    /* Get a bsend buffer.  Make it large enough that the Bsend
	       internals will (probably) not use a eager send for the data.
	       Have three such messages */
	    bufsize = 3 * (MPI_BSEND_OVERHEAD + msgsize);
	    buf     = (unsigned char *)malloc( bufsize );
	    if (!buf) {
		fprintf( stderr, "Unable to allocate a buffer of %d bytes\n",
			 bufsize );
		MPI_Abort( MPI_COMM_WORLD, 1 );
	    }
	    
	    MPI_Buffer_attach( buf, bufsize );

	    /* Initialize the buffers */
	    for (i=0; i<msgsize; i++) {
		msg1[i] = 0xff ^ (i & 0xff);
		msg2[i] = 0xff ^ (3*i & 0xff);
		msg3[i] = 0xff ^ (5*i & 0xff);
	    }

	    /* Initiate the bsends */
	    MPI_Bsend( msg1, msgsize, MPI_CHAR, dest, 0, comm );
	    MPI_Bsend( msg2, msgsize, MPI_CHAR, dest, 0, comm );
	    MPI_Bsend( msg3, msgsize, MPI_CHAR, dest, 0, comm );

	    /* Synchronize with our partner */
	    MPI_Sendrecv( 0, 0, MPI_CHAR, dest, 10, 
			  0, 0, MPI_CHAR, dest, 10, comm, MPI_STATUS_IGNORE );

	    /* Detach the buffers.  There should be pending operations */
	    MPI_Buffer_detach ( &bufp, &outsize );
	    if (bufp != buf) {
		fprintf( stderr, "Wrong buffer returned\n" );
		errs++;
	    }
	    if (outsize != bufsize) {
		fprintf( stderr, "Wrong buffer size returned\n" );
		errs++;
	    }
	}
	else if (rank == dest) {
	    double tstart;

	    /* Clear the message buffers */
	    for (i=0; i<msgsize; i++) {
		msg1[i] = 0;
		msg2[i] = 0;
		msg3[i] = 0;
	    }

	    /* Wait for the synchronize */
	    MPI_Sendrecv( 0, 0, MPI_CHAR, source, 10, 
			  0, 0, MPI_CHAR, source, 10, comm, MPI_STATUS_IGNORE );

	    /* Wait 2 seconds */
	    tstart = MPI_Wtime();
	    while (MPI_Wtime() - tstart < 2.0) ;

	    /* Now receive the messages */
	    MPI_Recv( msg1, msgsize, MPI_CHAR, source, 0, comm, &status1 );
	    MPI_Recv( msg2, msgsize, MPI_CHAR, source, 0, comm, &status2 );
	    MPI_Recv( msg3, msgsize, MPI_CHAR, source, 0, comm, &status3 );

	    /* Check that we have the correct data */
	    for (i=0; i<msgsize; i++) {
		if (msg1[i] != (0xff ^ (i & 0xff))) { 
		    if (errs < 10) {
			fprintf( stderr, "msg1[%d] = %d\n", i, msg1[i] );
		    }
		    errs++;
		}
		if (msg2[i] != (0xff ^ (3*i & 0xff))) {
		    if (errs < 10) {
			fprintf( stderr, "msg2[%d] = %d\n", i, msg2[i] );
		    }
		    errs++;
		}
		if (msg3[i] != (0xff ^ (5*i & 0xff))) {
		    if (errs < 10) {
			fprintf( stderr, "msg2[%d] = %d\n", i, msg2[i] );
		    }
		    errs++;
		}
	    }
	    
	}
		
	
	MTestFreeComm( &comm );
    }
    free( msg1 );

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #10
0
ファイル: accfence1.c プロジェクト: Niharikareddy/mpich
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Aint extent;
    MTestDatatype sendtype, recvtype;

    MTest_Init(&argc, &argv);

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        source = 0;
        dest = size - 1;

        MTEST_DATATYPE_FOR_EACH_COUNT(count) {
            while (MTestGetDatatypes(&sendtype, &recvtype, count)) {
                /* Make sure that everyone has a recv buffer */
                recvtype.InitBuf(&recvtype);

                MPI_Type_extent(recvtype.datatype, &extent);
                MPI_Win_create(recvtype.buf, recvtype.count * extent,
                               (int) extent, MPI_INFO_NULL, comm, &win);
                MPI_Win_fence(0, win);
                if (rank == source) {
                    sendtype.InitBuf(&sendtype);

                    /* To improve reporting of problems about operations, we
                     * change the error handler to errors return */
                    MPI_Win_set_errhandler(win, MPI_ERRORS_RETURN);

                    /* MPI_REPLACE on accumulate is almost the same
                     * as MPI_Put; the only difference is in the
                     * handling of overlapping accumulate operations,
                     * which are not tested here */
                    err = MPI_Accumulate(sendtype.buf, sendtype.count,
                                         sendtype.datatype, dest, 0,
                                         recvtype.count, recvtype.datatype, MPI_REPLACE, win);
                    if (err) {
                        errs++;
                        if (errs < 10) {
                            printf("Accumulate types: send %s, recv %s\n",
                                   MTestGetDatatypeName(&sendtype),
                                   MTestGetDatatypeName(&recvtype));
                            MTestPrintError(err);
                        }
                    }
                    err = MPI_Win_fence(0, win);
                    if (err) {
                        errs++;
                        if (errs < 10) {
                            MTestPrintError(err);
                        }
                    }
                }
                else if (rank == dest) {
                    MPI_Win_fence(0, win);
                    /* This should have the same effect, in terms of
                     * transfering data, as a send/recv pair */
                    err = MTestCheckRecv(0, &recvtype);
                    if (err) {
                        errs += err;
                    }
                }
                else {
                    MPI_Win_fence(0, win);
                }
                MPI_Win_free(&win);
                MTestFreeDatatype(&sendtype);
                MTestFreeDatatype(&recvtype);
            }
        }
        MTestFreeComm(&comm);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
コード例 #11
0
ファイル: icbcast.c プロジェクト: Julio-Anjos/simgrid
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int *buf = 0;
    int leftGroup, i, count, rank;
    MPI_Comm comm;
    MPI_Datatype datatype;

    MTest_Init( &argc, &argv );

    datatype = MPI_INT;
    /* Get an intercommunicator */
    while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
        if (comm == MPI_COMM_NULL)
            continue;

	MPI_Comm_rank( comm, &rank );

	/* To improve reporting of problems about operations, we
	   change the error handler to errors return */
	MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );

	for (count = 1; count < 65000; count = 2 * count) {
	    buf = (int *)malloc( count * sizeof(int) );
	    if (leftGroup) {
		if (rank == 0) {
		    for (i=0; i<count; i++) buf[i] = i;
		}
		else {
		    for (i=0; i<count; i++) buf[i] = -1;
		}
		err = MPI_Bcast( buf, count, datatype, 
				 (rank == 0) ? MPI_ROOT : MPI_PROC_NULL,
				 comm );
		if (err) {
		    errs++;
		    MTestPrintError( err );
		}
		/* Test that no other process in this group received the 
		   broadcast */
		if (rank != 0) {
		    for (i=0; i<count; i++) {
			if (buf[i] != -1) {
			    errs++;
			}
		    }
		}
	    }
	    else {
		/* In the right group */
		for (i=0; i<count; i++) buf[i] = -1;
		err = MPI_Bcast( buf, count, datatype, 0, comm );
		if (err) {
		    errs++;
		    MTestPrintError( err );
		}
		/* Check that we have received the correct data */
		for (i=0; i<count; i++) {
		    if (buf[i] != i) {
			errs++;
		    }
		}
	    }
	free( buf );
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #12
0
ファイル: groupnullincl.c プロジェクト: ParaStation/psmpi2
int main(int argc, char *argv[])
{
    int errs = 0;
    int rc, result;
    int ranks[1];
    MPI_Group group, outgroup;
    MPI_Comm comm;

    MTest_Init(&argc, &argv);
    /* To improve reporting of problems about operations, we
     * change the error handler to errors return */
    MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);

    while (MTestGetComm(&comm, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;

        MPI_Comm_group(comm, &group);
        rc = MPI_Group_incl(group, 0, 0, &outgroup);
        if (rc) {
            errs++;
            MTestPrintError(rc);
            printf("Error in creating an empty group with (0,0)\n");

            /* Some MPI implementations may reject a null "ranks" pointer */
            rc = MPI_Group_incl(group, 0, ranks, &outgroup);
            if (rc) {
                errs++;
                MTestPrintError(rc);
                printf("Error in creating an empty group with (0,ranks)\n");
            }
        }

        if (outgroup != MPI_GROUP_EMPTY) {
            /* Is the group equivalent to group empty? */
            rc = MPI_Group_compare(outgroup, MPI_GROUP_EMPTY, &result);
            if (result != MPI_IDENT) {
                errs++;
                MTestPrintError(rc);
                printf("Did not create a group equivalent to an empty group\n");
            }
        }
        rc = MPI_Group_free(&group);
        if (rc) {
            errs++;
            MTestPrintError(rc);
        }
        if (outgroup != MPI_GROUP_NULL) {
            rc = MPI_Group_free(&outgroup);
            if (rc) {
                errs++;
                MTestPrintError(rc);
            }
        }

        MTestFreeComm(&comm);
    }

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
コード例 #13
0
ファイル: gather2.c プロジェクト: NexMirror/MPICH
int main(int argc, char **argv)
{
    MPI_Datatype vec;
    double *vecin, *vecout;
    MPI_Comm comm;
    int count, minsize = 2;
    int root, i, n, stride, errs = 0;
    int rank, size;

    MTest_Init(&argc, &argv);

    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);

        for (root = 0; root < size; root++) {
            for (count = 1; count < 65000; count = count * 2) {
                n = 12;
                stride = 10;
                vecin = (double *) malloc(n * stride * size * sizeof(double));
                vecout = (double *) malloc(size * n * sizeof(double));

                MPI_Type_vector(n, 1, stride, MPI_DOUBLE, &vec);
                MPI_Type_commit(&vec);

                for (i = 0; i < n * stride; i++)
                    vecin[i] = -2;
                for (i = 0; i < n; i++)
                    vecin[i * stride] = rank * n + i;

                if (rank == root) {
                    for (i = 0; i < n; i++) {
                        vecout[rank * n + i] = rank * n + i;
                    }
                    MPI_Gather(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL,
                               vecout, n, MPI_DOUBLE, root, comm);
                }
                else {
                    MPI_Gather(vecin, 1, vec, NULL, -1, MPI_DATATYPE_NULL, root, comm);
                }
                if (rank == root) {
                    for (i = 0; i < n * size; i++) {
                        if (vecout[i] != i) {
                            errs++;
                            if (errs < 10) {
                                fprintf(stderr, "vecout[%d]=%d\n", i, (int) vecout[i]);
                            }
                        }
                    }
                }
                MPI_Type_free(&vec);
                free(vecin);
                free(vecout);
            }
        }
        MTestFreeComm(&comm);
    }

    /* do a zero length gather */
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    if (rank == 0) {
        MPI_Gather(MPI_IN_PLACE, -1, MPI_DATATYPE_NULL, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD);
    }
    else {
        MPI_Gather(NULL, 0, MPI_BYTE, NULL, 0, MPI_BYTE, 0, MPI_COMM_WORLD);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
コード例 #14
0
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size;
    int minsize = 2, count; 
    MPI_Comm      comm;
    int *sendbuf, *recvbuf, *p;
    int sendcount, recvcount;
    int i, j;
    MPI_Datatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;

	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	
	/* printf( "Size of comm = %d\n", size ); */
	for (count = 1; count < 65000; count = count * 2) {
	    
	    /* Create a send buf and a receive buf suitable for testing
	       all to all.  */
	    sendcount = count;
	    recvcount = count;
	    sendbuf   = (int *)malloc( count * size * sizeof(int) );
	    recvbuf   = (int *)malloc( count * size * sizeof(int) );
	    sendtype  = MPI_INT;
	    recvtype  = MPI_INT;

	    if (!sendbuf || !recvbuf) {
		errs++;
		fprintf( stderr, "Failed to allocate sendbuf and/or recvbuf\n" );
		MPI_Abort( MPI_COMM_WORLD, 1 );
	    }
	    for (i=0; i<count*size; i++) 
		recvbuf[i] = -1;
	    p = sendbuf;
	    for (j=0; j<size; j++) {
		for (i=0; i<count; i++) {
		    *p++ = j * size + rank + i;
		}
	    }

	    MPI_Alltoall( sendbuf, sendcount, sendtype,
			  recvbuf, recvcount, recvtype, comm );

	    p = recvbuf;
	    for (j=0; j<size; j++) {
		for (i=0; i<count; i++) {
		    if (*p != rank * size + j + i) {
			errs++;
			if (errs < 10) {
			    fprintf( stderr, "Error with communicator %s and size=%d count=%d\n",
				     MTestGetIntracommName(), size, count );
			    fprintf( stderr, "recvbuf[%d,%d] = %d, should %d\n",
				     j,i, *p, rank * size + j + i );
			}
		    }
		    p++;
		}
	    }

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
            /* check MPI_IN_PLACE, added in MPI-2.2 */
            p = recvbuf;
            for (j=0; j<size; j++) {
                for (i=0; i<count; i++) {
                    *p++ = j * size + rank + i;
                }
            }
            MPI_Alltoall( MPI_IN_PLACE, 0/*ignored*/, MPI_INT/*ignored*/,
                          recvbuf, recvcount, recvtype, comm );
            p = recvbuf;
            for (j=0; j<size; j++) {
                for (i=0; i<count; i++) {
                    if (*p != rank * size + j + i) {
                        errs++;
                        if (errs < 10) {
                            fprintf( stderr, "Error (MPI_IN_PLACE) with communicator %s and size=%d count=%d\n",
                                     MTestGetIntracommName(), size, count );
                            fprintf(stderr, "recvbuf[%d,%d] = %d, should be %d\n",
                                    j,i, *p, rank * size + j + i );
                        }
                    }
                    p++;
                }
            }
#endif

	    free( recvbuf );
	    free( sendbuf );
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #15
0
ファイル: template.c プロジェクト: abhinavvishnu/matex
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Status    status;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;

	/* To improve reporting of problems about operations, we
	   change the error handler to errors return */
	MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
		if (rank == source) {
		    sendtype.InitBuf( &sendtype );
		    
		    err = MPI_Send( sendtype.buf, sendtype.count, 
				    sendtype.datatype, dest, 0, comm );
		    if (err) {
			errs++;
			MTestPrintError( err );
		    }
		    MTestFreeDatatype( &sendtype );
		}
		else if (rank == dest) {
		    recvtype.InitBuf( &recvtype );
		    err = MPI_Recv( recvtype.buf, recvtype.count, 
				    recvtype.datatype, source, 0, comm, &status );
		    if (err) {
			errs++;
			fprintf( stderr, "Error with communicator %s and datatype %s\n", 
				 MTestGetIntracommName(), 
				 MTestGetDatatypeName( &recvtype ) );
			MTestPrintError( err );
		    }
		    err = MTestCheckRecv( &status, &recvtype );
		    if (err) {
			errs += errs;
		    }
		    MTestFreeDatatype( &recvtype );
		}
	    }
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #16
0
int main( int argc, char **argv )
{
    MPI_Comm comm;
    int      *sbuf, *rbuf;
    int      rank, size, lsize, asize;
    int      *sendcounts, *recvcounts, *rdispls, *sdispls;
    int      i, j, *p, err;
    int      leftGroup;

    MTest_Init( &argc, &argv );
    err = 0;

    while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
      if (comm == MPI_COMM_NULL) continue;

      /* Create the buffer */
      MPI_Comm_size( comm, &lsize );
      MPI_Comm_remote_size( comm, &size );
      asize = (lsize > size) ? lsize : size;
      MPI_Comm_rank( comm, &rank );
      sbuf = (int *)malloc( size * size * sizeof(int) );
      rbuf = (int *)malloc( asize * asize * sizeof(int) );
      if (!sbuf || !rbuf) {
	fprintf( stderr, "Could not allocated buffers!\n" );
	MPI_Abort( comm, 1 );
      }

      /* Load up the buffers */
      for (i=0; i<size*size; i++) {
	sbuf[i] = i + 100*rank;
	rbuf[i] = -i;
      }

      /* Create and load the arguments to alltoallv */
      sendcounts = (int *)malloc( size * sizeof(int) );
      recvcounts = (int *)malloc( size * sizeof(int) );
      rdispls    = (int *)malloc( size * sizeof(int) );
      sdispls    = (int *)malloc( size * sizeof(int) );
      if (!sendcounts || !recvcounts || !rdispls || !sdispls) {
	fprintf( stderr, "Could not allocate arg items!\n" );
	MPI_Abort( comm, 1 );
      }
      for (i=0; i<size; i++) {
	sendcounts[i] = i;
	sdispls[i]    = (i * (i+1))/2;
	recvcounts[i] = rank;
	rdispls[i] = i * rank;
      }
      MPI_Alltoallv( sbuf, sendcounts, sdispls, MPI_INT,
		     rbuf, recvcounts, rdispls, MPI_INT, comm );

      /* Check rbuf */
      for (i=0; i<size; i++) {
	p = rbuf + rdispls[i];
	for (j=0; j<rank; j++) {
	  if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
	    fprintf( stderr, "[%d] got %d expected %d for %dth\n",
		     rank, p[j],(i*(i+1))/2 + j, j );
	    err++;
	  }
	}
      }

      free( sdispls );
      free( rdispls );
      free( recvcounts );
      free( sendcounts );
      free( rbuf );
      free( sbuf );
      MTestFreeComm( &comm );
    }

    MTest_Finalize( err );
    MPI_Finalize();
    return 0;
}
コード例 #17
0
ファイル: lock_dt.c プロジェクト: jeffhammond/mpich
int main(int argc, char *argv[])
{
    int err, errs = 0;
    int rank, size, orig, target;
    int minsize = 2, count;
    int i, j;
    MPI_Aint origcount, targetcount;
    MPI_Comm comm;
    MPI_Win win;
    MPI_Aint lb, extent;
    MPI_Datatype origtype, targettype;
    DTP_t orig_dtp, target_dtp;
    void *origbuf, *targetbuf;

    MTest_Init(&argc, &argv);

#ifndef USE_DTP_POOL_TYPE__STRUCT       /* set in 'test/mpi/structtypetest.txt' to split tests */
    MPI_Datatype basic_type;
    int len;
    char type_name[MPI_MAX_OBJECT_NAME] = { 0 };

    err = MTestInitBasicSignature(argc, argv, &count, &basic_type);
    if (err)
        return MTestReturnValue(1);

    err = DTP_pool_create(basic_type, count, &orig_dtp);
    if (err != DTP_SUCCESS) {
        MPI_Type_get_name(basic_type, type_name, &len);
        fprintf(stdout, "Error while creating orig pool (%s,%d)\n", type_name, count);
        fflush(stdout);
    }

    err = DTP_pool_create(basic_type, count, &target_dtp);
    if (err != DTP_SUCCESS) {
        MPI_Type_get_name(basic_type, type_name, &len);
        fprintf(stdout, "Error while creating target pool (%s,%d)\n", type_name, count);
        fflush(stdout);
    }
#else
    MPI_Datatype *basic_types = NULL;
    int *basic_type_counts = NULL;
    int basic_type_num;

    err = MTestInitStructSignature(argc, argv, &basic_type_num, &basic_type_counts, &basic_types);
    if (err)
        return MTestReturnValue(1);

    err = DTP_pool_create_struct(basic_type_num, basic_types, basic_type_counts, &orig_dtp);
    if (err != DTP_SUCCESS) {
        fprintf(stdout, "Error while creating struct pool\n");
        fflush(stdout);
    }

    err = DTP_pool_create_struct(basic_type_num, basic_types, basic_type_counts, &target_dtp);
    if (err != DTP_SUCCESS) {
        fprintf(stdout, "Error while creating struct pool\n");
        fflush(stdout);
    }

    /* this is ignored */
    count = 0;
#endif

    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;

        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        orig = 0;
        target = size - 1;

        for (i = 0; i < target_dtp->DTP_num_objs; i++) {
            err = DTP_obj_create(target_dtp, i, 0, 0, 0);
            if (err != DTP_SUCCESS) {
                errs++;
                break;
            }

            targetcount = target_dtp->DTP_obj_array[i].DTP_obj_count;
            targettype = target_dtp->DTP_obj_array[i].DTP_obj_type;
            targetbuf = target_dtp->DTP_obj_array[i].DTP_obj_buf;

            MPI_Type_get_extent(targettype, &lb, &extent);

            MPI_Win_create(targetbuf, lb + targetcount * extent,
                           (int) extent, MPI_INFO_NULL, comm, &win);

            for (j = 0; j < orig_dtp->DTP_num_objs; j++) {
                err = DTP_obj_create(orig_dtp, j, 0, 1, count);
                if (err != DTP_SUCCESS) {
                    errs++;
                    break;
                }

                origcount = orig_dtp->DTP_obj_array[j].DTP_obj_count;
                origtype = orig_dtp->DTP_obj_array[j].DTP_obj_type;
                origbuf = orig_dtp->DTP_obj_array[j].DTP_obj_buf;

                if (rank == orig) {
                    MPI_Win_lock(MPI_LOCK_SHARED, target, 0, win);
                    MPI_Accumulate(origbuf, origcount,
                                   origtype, target, 0, targetcount, targettype, MPI_REPLACE, win);
                    MPI_Win_unlock(target, win);
                    MPI_Barrier(comm);

                    char *resbuf = (char *) calloc(lb + extent * targetcount, sizeof(char));

                    /*wait for the destination to finish checking and reinitializing the buffer */
                    MPI_Barrier(comm);

                    MPI_Win_lock(MPI_LOCK_SHARED, target, 0, win);
                    MPI_Get_accumulate(origbuf, origcount,
                                       origtype, resbuf, targetcount, targettype,
                                       target, 0, targetcount, targettype, MPI_REPLACE, win);
                    MPI_Win_unlock(target, win);
                    MPI_Barrier(comm);
                    free(resbuf);
                } else if (rank == target) {
                    /* TODO: add a DTP_buf_set() function to replace this */
                    char *tmp = (char *) calloc(lb + extent * targetcount, sizeof(char));
                    memcpy(tmp, targetbuf, lb + extent * targetcount);

                    MPI_Barrier(comm);
                    MPI_Win_lock(MPI_LOCK_SHARED, target, 0, win);
                    err = DTP_obj_buf_check(target_dtp, i, 0, 1, count);
                    if (err != DTP_SUCCESS) {
                        errs++;
                    }
                    /* restore target buffer */
                    memcpy(targetbuf, tmp, lb + extent * targetcount);
                    free(tmp);

                    MPI_Win_unlock(target, win);

                    /*signal the source that checking and reinitialization is done */
                    MPI_Barrier(comm);

                    MPI_Barrier(comm);
                    MPI_Win_lock(MPI_LOCK_SHARED, target, 0, win);
                    err = DTP_obj_buf_check(target_dtp, i, 0, 1, count);
                    if (err != DTP_SUCCESS) {
                        errs++;
                    }
                    MPI_Win_unlock(target, win);
                }
                DTP_obj_free(orig_dtp, j);
            }
            MPI_Win_free(&win);
            DTP_obj_free(target_dtp, i);
        }
        MTestFreeComm(&comm);
    }
    DTP_pool_free(orig_dtp);
    DTP_pool_free(target_dtp);

#ifdef USE_DTP_POOL_TYPE__STRUCT
    /* cleanup array if any */
    if (basic_types) {
        free(basic_types);
    }
    if (basic_type_counts) {
        free(basic_type_counts);
    }
#endif

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}
コード例 #18
0
ファイル: accfence2_am.c プロジェクト: NexMirror/MPICH
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size, source;
    int minsize = 2, count, i;
    MPI_Comm comm;
    MPI_Win win;
    int *winbuf, *sbuf;

    MTest_Init(&argc, &argv);

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        source = 0;

        for (count = 32768; count < 65000; count = count * 2) {

            /* We compare with an integer value that can be as large as
             * size * (count * count + (1/2)*(size-1))
             * For large machines (size large), this can exceed the
             * maximum integer for some large values of count.  We check
             * that in advance and break this loop if the above value
             * would exceed MAX_INT.  Specifically,
             *
             * size*count*count + (1/2)*size*(size-1) > MAX_INT
             * count*count > (MAX_INT/size - (1/2)*(size-1))
             */
            if (count * count > (MAX_INT / size - (size - 1) / 2))
                break;

            MPI_Alloc_mem(count * sizeof(int), MPI_INFO_NULL, &winbuf);
            MPI_Alloc_mem(count * sizeof(int), MPI_INFO_NULL, &sbuf);

            for (i = 0; i < count; i++)
                winbuf[i] = 0;
            for (i = 0; i < count; i++)
                sbuf[i] = rank + i * count;
            MPI_Win_create(winbuf, count * sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win);
            MPI_Win_fence(0, win);
            MPI_Accumulate(sbuf, count, MPI_INT, source, 0, count, MPI_INT, MPI_SUM, win);
            MPI_Win_fence(0, win);
            if (rank == source) {
                /* Check the results */
                for (i = 0; i < count; i++) {
                    int result = i * count * size + (size * (size - 1)) / 2;
                    if (winbuf[i] != result) {
                        if (errs < 10) {
                            fprintf(stderr,
                                    "Winbuf[%d] = %d, expected %d (count = %d, size = %d)\n", i,
                                    winbuf[i], result, count, size);
                        }
                        errs++;
                    }
                }
            }

            MPI_Win_free(&win);

            MPI_Free_mem(winbuf);
            MPI_Free_mem(sbuf);
        }
        MTestFreeComm(&comm);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
コード例 #19
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, root;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	
	count = 1;
	MTEST_DATATYPE_FOR_EACH_COUNT(count) {

        /* To shorten test time, only run the default version of datatype tests
         * for comm world and run the minimum version for other communicators. */
        if (comm != MPI_COMM_WORLD) {
            MTestInitMinDatatypes();
        }

	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
		for (root=0; root<size; root++) {
		    if (rank == root) {
			sendtype.InitBuf( &sendtype );
			err = MPI_Bcast( sendtype.buf, sendtype.count,
					 sendtype.datatype, root, comm );
			if (err) {
			    errs++;
			    MTestPrintError( err );
			}
		    }
		    else {
			recvtype.InitBuf( &recvtype );
			err = MPI_Bcast( recvtype.buf, recvtype.count, 
				    recvtype.datatype, root, comm );
			if (err) {
			    errs++;
			    fprintf( stderr, "Error with communicator %s and datatype %s\n", 
				 MTestGetIntracommName(), 
				 MTestGetDatatypeName( &recvtype ) );
			    MTestPrintError( err );
			}
			err = MTestCheckRecv( 0, &recvtype );
			if (err) {
			    errs += errs;
			}
		    }
		}
		MTestFreeDatatype( &recvtype );
		MTestFreeDatatype( &sendtype );
	    }
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #20
0
ファイル: pingping.c プロジェクト: dbrowneup/pmap
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count, nmsg, maxmsg;
    MPI_Comm      comm;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
        if (comm == MPI_COMM_NULL) continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank( comm, &rank );
        MPI_Comm_size( comm, &size );
        source = 0;
        dest   = size - 1;

        /* To improve reporting of problems about operations, we
           change the error handler to errors return */
        MPI_Comm_set_errhandler( comm, MPI_ERRORS_RETURN );

        for (count = 1; count < MAX_COUNT; count = count * 2) {
            while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
                int nbytes;
                MPI_Type_size( sendtype.datatype, &nbytes );

                /* We may want to limit the total message size sent */
                if (nbytes > MAX_MSG_SIZE) {
                    /* We do not need to free, as we haven't
                       initialized any of the buffers (?) */
                    continue;
                }
                maxmsg = MAX_COUNT - count;
                MTestPrintfMsg( 1, "Sending count = %d of sendtype %s of total size %d bytes\n",
                                count, MTestGetDatatypeName( &sendtype ),
                                nbytes*count );
                /* Make sure that everyone has a recv buffer */
                recvtype.InitBuf( &recvtype );

                if (rank == source) {
                    sendtype.InitBuf( &sendtype );

                    for (nmsg=1; nmsg<maxmsg; nmsg++) {
                        err = MPI_Send( sendtype.buf, sendtype.count,
                                        sendtype.datatype, dest, 0, comm);
                        if (err) {
                            errs++;
                            if (errs < 10) {
                                MTestPrintError( err );
                            }
                        }
                    }
                }
                else if (rank == dest) {
                    for (nmsg=1; nmsg<maxmsg; nmsg++) {
                        err = MPI_Recv( recvtype.buf, recvtype.count,
                                        recvtype.datatype, source, 0,
                                        comm, MPI_STATUS_IGNORE);
                        if (err) {
                            errs++;
                            if (errs < 10) {
                                MTestPrintError( err );
                            }
                        }

                        err = MTestCheckRecv( 0, &recvtype );
                        if (err) {
                            if (errs < 10) {
                                printf( "Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d, message iteration %d of %d\n",
                                        MTestGetDatatypeName( &recvtype ),
                                        MTestGetDatatypeName( &sendtype ),
                                        count, nmsg, maxmsg );
                                recvtype.printErrors = 1;
                                (void)MTestCheckRecv( 0, &recvtype );
                            }
                            errs += err;
                        }
                    }
                }
                MTestFreeDatatype( &recvtype );
                MTestFreeDatatype( &sendtype );
            }
        }
        MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #21
0
ファイル: putpscw1.c プロジェクト: Julio-Anjos/simgrid
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MPI_Win       win;
    MPI_Aint      extent;
    MPI_Group     wingroup, neighbors;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	source = 0;
	dest   = size - 1;
	
	for (count = 1; count < 65000; count = count * 2) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count )) {
		/* Make sure that everyone has a recv buffer */
		recvtype.InitBuf( &recvtype );

		MPI_Type_extent( recvtype.datatype, &extent );
		MPI_Win_create( recvtype.buf, recvtype.count * extent, 
				(int)extent, MPI_INFO_NULL, comm, &win );
		MPI_Win_get_group( win, &wingroup );
		if (rank == source) {
		    /* To improve reporting of problems about operations, we
		       change the error handler to errors return */
		    MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN );
		    sendtype.InitBuf( &sendtype );
		    
		    /* Neighbor is dest only */
		    MPI_Group_incl( wingroup, 1, &dest, &neighbors );
		    err = MPI_Win_start( neighbors, 0, win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		    MPI_Group_free( &neighbors );
		    err = MPI_Put( sendtype.buf, sendtype.count, 
				    sendtype.datatype, dest, 0, 
				   recvtype.count, recvtype.datatype, win );
		    if (err) {
			errs++;
			MTestPrintError( err );
		    }
		    err = MPI_Win_complete( win );
		    if (err) {
			errs++;
			if (errs < 10) {
			    MTestPrintError( err );
			}
		    }
		}
		else if (rank == dest) {
		    MPI_Group_incl( wingroup, 1, &source, &neighbors );
		    MPI_Win_post( neighbors, 0, win );
		    MPI_Group_free( &neighbors );
		    MPI_Win_wait( win );
		    /* This should have the same effect, in terms of
		       transfering data, as a send/recv pair */
		    err = MTestCheckRecv( 0, &recvtype );
		    if (err) {
			errs += errs;
		    }
		}
		else {
		    /* Nothing; the other processes need not call any 
		       MPI routines */
		    ;
		}
		MPI_Win_free( &win );
		MTestFreeDatatype( &sendtype );
		MTestFreeDatatype( &recvtype );
		MPI_Group_free( &wingroup );
	    }
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #22
0
ファイル: icm.c プロジェクト: NexMirror/MPICH
int main(int argc, char *argv[])
{
    int errs = 0;
    int rank, size, rsize;
    int nsize, nrank;
    int minsize = 2;
    int isLeft;
    MPI_Comm comm, comm1, comm2, comm3, comm4;

    MTest_Init(&argc, &argv);

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntercomm(&comm, &isLeft, minsize)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_remote_size(comm, &rsize);
        MPI_Comm_size(comm, &size);

        /* Try building intercomms */
        MPI_Intercomm_merge(comm, isLeft, &comm1);
        /* Check the size and ranks */
        MPI_Comm_size(comm1, &nsize);
        MPI_Comm_rank(comm1, &nrank);
        if (nsize != size + rsize) {
            errs++;
            printf("(1) Comm size is %d but should be %d\n", nsize, size + rsize);
            if (isLeft) {
                /* The left processes should be high */
                if (nrank != rsize + rank) {
                    errs++;
                    printf("(1) rank for high process is %d should be %d\n", nrank, rsize + rank);
                }
            }
            else {
                /* The right processes should be low */
                if (nrank != rank) {
                    errs++;
                    printf("(1) rank for low process is %d should be %d\n", nrank, rank);
                }
            }
        }

        MPI_Intercomm_merge(comm, !isLeft, &comm2);
        /* Check the size and ranks */
        MPI_Comm_size(comm1, &nsize);
        MPI_Comm_rank(comm1, &nrank);
        if (nsize != size + rsize) {
            errs++;
            printf("(2) Comm size is %d but should be %d\n", nsize, size + rsize);
            if (!isLeft) {
                /* The right processes should be high */
                if (nrank != rsize + rank) {
                    errs++;
                    printf("(2) rank for high process is %d should be %d\n", nrank, rsize + rank);
                }
            }
            else {
                /* The left processes should be low */
                if (nrank != rank) {
                    errs++;
                    printf("(2) rank for low process is %d should be %d\n", nrank, rank);
                }
            }
        }


        MPI_Intercomm_merge(comm, 0, &comm3);

        MPI_Intercomm_merge(comm, 1, &comm4);

        MPI_Comm_free(&comm1);
        MPI_Comm_free(&comm2);
        MPI_Comm_free(&comm3);
        MPI_Comm_free(&comm4);

        MTestFreeComm(&comm);
    }

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
コード例 #23
0
ファイル: red3.c プロジェクト: mpifl/mpich3newforfile
int main( int argc, char *argv[] )
{
    int errs = 0;
    int rank, size, root;
    int minsize = 2, count; 
    MPI_Comm      comm;
    int *buf, *bufout;
    MPI_Op op;
    MPI_Datatype mattype;

    MTest_Init( &argc, &argv );

    MPI_Op_create( uop, 0, &op );
    
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;

	MPI_Comm_size( comm, &size );
	MPI_Comm_rank( comm, &rank );

	matSize = size;  /* used by the user-defined operation */
	/* Only one matrix for now */
	count = 1;

	/* A single matrix, the size of the communicator */
	MPI_Type_contiguous( size*size, MPI_INT, &mattype );
	MPI_Type_commit( &mattype );
	
	buf = (int *)malloc( count * size * size * sizeof(int) );
	if (!buf) MPI_Abort( MPI_COMM_WORLD, 1 );
	bufout = (int *)malloc( count * size * size * sizeof(int) );
	if (!bufout) MPI_Abort( MPI_COMM_WORLD, 1 );

	for (root = 0; root < size; root ++) {
	    initMat( comm, buf );
	    MPI_Reduce( buf, bufout, count, mattype, op, root, comm );
	    if (rank == root) {
		errs += isShiftLeft( comm, bufout );
	    }

	    /* Try the same test, but using MPI_IN_PLACE */
	    initMat( comm, bufout );
	    if (rank == root) {
		MPI_Reduce( MPI_IN_PLACE, bufout, count, mattype, op, root, comm );
	    }
	    else {
		MPI_Reduce( bufout, NULL, count, mattype, op, root, comm );
	    }
	    if (rank == root) {
		errs += isShiftLeft( comm, bufout );
	    }

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
            /* Try one more time without IN_PLACE to make sure we check
             * aliasing correctly */
            if (rank == root) {
                MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN);
                if (MPI_SUCCESS == MPI_Reduce( bufout, bufout, count, mattype, op, root, comm ))
                    errs++;
            }
#endif
	}

	free( buf );
	free( bufout );
	
	MPI_Type_free( &mattype );

	MTestFreeComm( &comm );
    }

    MPI_Op_free( &op );

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #24
0
ファイル: bcast3.c プロジェクト: abhinavvishnu/matex
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int rank, size, root;
    int minsize = 2, count; 
    MPI_Comm      comm;
    MTestDatatype sendtype, recvtype;

    MTest_Init( &argc, &argv );

    /* The following illustrates the use of the routines to 
       run through a selection of communicators and datatypes.
       Use subsets of these for tests that do not involve combinations 
       of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral( &comm, minsize, 1 )) {
	if (comm == MPI_COMM_NULL) continue;
	/* Determine the sender and receiver */
	MPI_Comm_rank( comm, &rank );
	MPI_Comm_size( comm, &size );
	
	count = 1;
	/* This must be very large to ensure that we reach the long message
	   algorithms */
	for (count = 4; count < 66000; count = count * 4) {
	    while (MTestGetDatatypes( &sendtype, &recvtype, count-1 )) {
		for (root=0; root<size; root++) {
		    if (rank == root) {
			sendtype.InitBuf( &sendtype );
			err = MPI_Bcast( sendtype.buf, sendtype.count,
					 sendtype.datatype, root, comm );
			if (err) {
			    errs++;
			    MTestPrintError( err );
			}
		    }
		    else {
			recvtype.InitBuf( &recvtype );
			err = MPI_Bcast( recvtype.buf, recvtype.count, 
				    recvtype.datatype, root, comm );
			if (err) {
			    errs++;
			    fprintf( stderr, "Error with communicator %s and datatype %s\n", 
				 MTestGetIntracommName(), 
				 MTestGetDatatypeName( &recvtype ) );
			    MTestPrintError( err );
			}
			err = MTestCheckRecv( 0, &recvtype );
			if (err) {
			    errs += errs;
			}
		    }
		}
		MTestFreeDatatype( &recvtype );
		MTestFreeDatatype( &sendtype );
	    }
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #25
0
int main( int argc, char *argv[] )
{
    int errs = 0, err;
    int *sendbuf = 0, *recvbuf = 0;
    int leftGroup, i, j, idx, count, rrank, rsize;
    MPI_Comm comm;
    MPI_Datatype datatype;

    MTest_Init( &argc, &argv );

    datatype = MPI_INT;
    while (MTestGetIntercomm( &comm, &leftGroup, 4 )) {
	if (comm == MPI_COMM_NULL) continue;
	for (count = 1; count < 66000; count = 2 * count) {
	    /* Get an intercommunicator */
	    MPI_Comm_remote_size( comm, &rsize );
	    MPI_Comm_rank( comm, &rrank );
	    sendbuf = (int *)malloc( rsize * count * sizeof(int) );
	    recvbuf = (int *)malloc( rsize * count * sizeof(int) );
	    for (i=0; i<rsize*count; i++) recvbuf[i] = -1;
	    if (leftGroup) {
		idx = 0;
		for (j=0; j<rsize; j++) {
		    for (i=0; i<count; i++) {
			sendbuf[idx++] = i + rrank;
		    }
		}
		err = MPI_Alltoall( sendbuf, count, datatype, 
				    NULL, 0, datatype, comm );
		if (err) {
		    errs++;
		    MTestPrintError( err );
		}
	    }
	    else {
		int rank, size;

		MPI_Comm_rank( comm, &rank );
		MPI_Comm_size( comm, &size );

		/* In the right group */
		err = MPI_Alltoall( NULL, 0, datatype, 
				    recvbuf, count, datatype, comm );
		if (err) {
		    errs++;
		    MTestPrintError( err );
		}
		/* Check that we have received the correct data */
		idx = 0;
		for (j=0; j<rsize; j++) {
		    for (i=0; i<count; i++) {
			if (recvbuf[idx++] != i + j) {
			    errs++;
			    if (errs < 10) 
				fprintf( stderr, "buf[%d] = %d on %d\n", 
					 i, recvbuf[i], rank );
			}
		    }
		}
	    }
	    free( recvbuf );
	    free( sendbuf );
	}
	MTestFreeComm( &comm );
    }

    MTest_Finalize( errs );
    MPI_Finalize();
    return 0;
}
コード例 #26
0
ファイル: idup_comm_gen.c プロジェクト: Niharikareddy/mpich
MTEST_THREAD_RETURN_TYPE test_idup(void *arg)
{
    int i;
    int size, rank;
    int ranges[1][3];
    int rleader, isLeft;
    int *excl = NULL;
    int tid = *(int *) arg;

    MPI_Group ingroup, high_group, even_group;
    MPI_Comm local_comm, inter_comm;
    MPI_Comm idupcomms[NUM_IDUPS];
    MPI_Request reqs[NUM_IDUPS];

    MPI_Comm outcomm;
    MPI_Comm incomm = comms[tid];

    MPI_Comm_size(incomm, &size);
    MPI_Comm_rank(incomm, &rank);
    MPI_Comm_group(incomm, &ingroup);

    /* Idup incomm multiple times */
    for (i = 0; i < NUM_IDUPS; i++) {
        MPI_Comm_idup(incomm, &idupcomms[i], &reqs[i]);
    }

    /* Overlap pending idups with various comm generation functions */
    /* Comm_dup */
    MPI_Comm_dup(incomm, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_split */
    MPI_Comm_split(incomm, rank % 2, size - rank, &outcomm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create, high half of incomm */
    ranges[0][0] = size / 2;
    ranges[0][1] = size - 1;
    ranges[0][2] = 1;
    MPI_Group_range_incl(ingroup, 1, ranges, &high_group);
    MPI_Comm_create(incomm, high_group, &outcomm);
    MPI_Group_free(&high_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Comm_create_group, even ranks of incomm */
    /* exclude the odd ranks */
    excl = malloc((size / 2) * sizeof(int));
    for (i = 0; i < size / 2; i++)
        excl[i] = (2 * i) + 1;

    MPI_Group_excl(ingroup, size / 2, excl, &even_group);
    free(excl);

    if (rank % 2 == 0) {
        MPI_Comm_create_group(incomm, even_group, 0, &outcomm);
    }
    else {
        outcomm = MPI_COMM_NULL;
    }
    MPI_Group_free(&even_group);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    /* Intercomm_create & Intercomm_merge */
    MPI_Comm_split(incomm, (rank < size / 2), rank, &local_comm);
    if (rank == 0) {
        rleader = size / 2;
    }
    else if (rank == size / 2) {
        rleader = 0;
    }
    else {
        rleader = -1;
    }
    isLeft = rank < size / 2;

    MPI_Intercomm_create(local_comm, 0, incomm, rleader, 99, &inter_comm);
    MPI_Intercomm_merge(inter_comm, isLeft, &outcomm);
    MPI_Comm_free(&local_comm);

    errs[tid] += MTestTestComm(inter_comm);
    MTestFreeComm(&inter_comm);
    errs[tid] += MTestTestComm(outcomm);
    MTestFreeComm(&outcomm);

    MPI_Waitall(NUM_IDUPS, reqs, MPI_STATUSES_IGNORE);
    for (i = 0; i < NUM_IDUPS; i++) {
        errs[tid] += MTestTestComm(idupcomms[i]);
        MPI_Comm_free(&idupcomms[i]);
    }
    MPI_Group_free(&ingroup);
    return NULL;
}
コード例 #27
0
ファイル: redscatinter.c プロジェクト: abhinavvishnu/matex
int main( int argc, char **argv )
{
    int      err = 0;
    int      *recvcounts;
    int      size, rsize, rank, i;
    int      recvcount, /* Each process receives this much data */
             sendcount, /* Each process contributes this much data */
	     basecount; /* Unit of elements - basecount *rsize is recvcount, 
			   etc. */
    int      isLeftGroup;
    long long *sendbuf, *recvbuf;
    long long sumval;
    MPI_Comm comm;


    MTest_Init( &argc, &argv );
    comm = MPI_COMM_WORLD;

    basecount = 1024;

    while (MTestGetIntercomm( &comm, &isLeftGroup, 2 )) {
	if (comm == MPI_COMM_NULL) continue;

	MPI_Comm_remote_size( comm, &rsize );
	MPI_Comm_size( comm, &size );
	MPI_Comm_rank( comm, &rank );

	if (0) {
	    printf( "[%d] %s (%d,%d) remote %d\n", rank, 
		    isLeftGroup ? "L" : "R", 
		    rank, size, rsize );
	}

	recvcount = basecount * rsize;
	sendcount = basecount * rsize * size;

	recvcounts = (int *)malloc( size * sizeof(int) );
	if (!recvcounts) {
	    fprintf( stderr, "Could not allocate %d int for recvcounts\n", 
		     size );
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}
	for (i=0; i<size; i++) 
	    recvcounts[i] = recvcount;
	
	sendbuf = (long long *) malloc( sendcount * sizeof(long long) );
	if (!sendbuf) {
	    fprintf( stderr, "Could not allocate %d ints for sendbuf\n", 
		     sendcount );
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}

	for (i=0; i<sendcount; i++) {
	    sendbuf[i] = (long long)(rank*sendcount + i);
	}
	recvbuf = (long long *)malloc( recvcount * sizeof(long long) );
	if (!recvbuf) {
	    fprintf( stderr, "Could not allocate %d ints for recvbuf\n", 
		     recvcount );
	    MPI_Abort( MPI_COMM_WORLD, 1 );
	}
	for (i=0; i<recvcount; i++) {
	    recvbuf[i] = (long long)(-i);
	}
	
	MPI_Reduce_scatter( sendbuf, recvbuf, recvcounts, MPI_LONG_LONG, MPI_SUM,
			    comm );

	/* Check received data */
	for (i=0; i<recvcount; i++) {
	    sumval = (long long)(sendcount) * (long long)((rsize * (rsize-1))/2) +
		(long long)(i + rank * rsize * basecount) * (long long)rsize;
	    if (recvbuf[i] != sumval) {
		err++;
		if (err < 4) {
		    fprintf( stdout, "Did not get expected value for reduce scatter\n" );
		    fprintf( stdout, "[%d] %s recvbuf[%d] = %lld, expected %lld\n",
			     rank, 
			     isLeftGroup ? "L" : "R", 
			     i, recvbuf[i], sumval );
		}
	    }
	}
	
	free(sendbuf);
	free(recvbuf);
	free(recvcounts);

	MTestFreeComm( &comm );
    }

    MTest_Finalize( err );

    MPI_Finalize( );

    return 0;
}
コード例 #28
0
ファイル: allred4.c プロジェクト: NexMirror/MPICH
int main(int argc, char *argv[])
{
    int errs = 0;
    int size, rank;
    int minsize = 2, count;
    MPI_Comm comm;
    int *buf, *bufout;
    MPI_Op op;
    MPI_Datatype mattype;
    int i;

    MTest_Init(&argc, &argv);

    MPI_Op_create(matmult, 0, &op);

    /* A single rotation matrix (3x3, stored as 9 consequetive elements) */
    MPI_Type_contiguous(9, MPI_INT, &mattype);
    MPI_Type_commit(&mattype);

    /* Sanity check: test that our routines work properly */
    {
        int one = 1;
        buf = (int *) malloc(4 * 9 * sizeof(int));
        initMat(0, 4, 0, &buf[0]);
        initMat(1, 4, 0, &buf[9]);
        initMat(2, 4, 0, &buf[18]);
        initMat(3, 4, 0, &buf[27]);
        matmult(&buf[0], &buf[9], &one, &mattype);
        matmult(&buf[9], &buf[18], &one, &mattype);
        matmult(&buf[18], &buf[27], &one, &mattype);
        checkResult(1, &buf[27], "Sanity Check");
        free(buf);
    }

    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;

        MPI_Comm_size(comm, &size);
        MPI_Comm_rank(comm, &rank);

        for (count = 1; count < size; count++) {

            /* Allocate the matrices */
            buf = (int *) malloc(count * 9 * sizeof(int));
            if (!buf) {
                MPI_Abort(MPI_COMM_WORLD, 1);
            }

            bufout = (int *) malloc(count * 9 * sizeof(int));
            if (!bufout) {
                MPI_Abort(MPI_COMM_WORLD, 1);
            }

            for (i = 0; i < count; i++) {
                initMat(rank, size, i, &buf[i * 9]);
            }

            MPI_Allreduce(buf, bufout, count, mattype, op, comm);
            errs += checkResult(count, bufout, "");

            /* Try the same test, but using MPI_IN_PLACE */
            for (i = 0; i < count; i++) {
                initMat(rank, size, i, &bufout[i * 9]);
            }
            MPI_Allreduce(MPI_IN_PLACE, bufout, count, mattype, op, comm);
            errs += checkResult(count, bufout, "IN_PLACE");

            free(buf);
            free(bufout);
        }
        MTestFreeComm(&comm);
    }

    MPI_Op_free(&op);
    MPI_Type_free(&mattype);

    MTest_Finalize(errs);
    MPI_Finalize();
    return 0;
}
コード例 #29
0
ファイル: alltoallw2.c プロジェクト: abhinavvishnu/matex
int main( int argc, char **argv )
{

    MPI_Comm comm;
    int      *sbuf, *rbuf;
    int      rank, size;
    int      *sendcounts, *recvcounts, *rdispls, *sdispls;
    int      i, j, *p, err;
    MPI_Datatype *sendtypes, *recvtypes;
    
    MTest_Init( &argc, &argv );
    err = 0;
    
    while (MTestGetIntracommGeneral( &comm, 2, 1 )) {
      if (comm == MPI_COMM_NULL) continue;

      /* Create the buffer */
      MPI_Comm_size( comm, &size );
      MPI_Comm_rank( comm, &rank );
      sbuf = (int *)malloc( size * size * sizeof(int) );
      rbuf = (int *)malloc( size * size * sizeof(int) );
      if (!sbuf || !rbuf) {
	fprintf( stderr, "Could not allocated buffers!\n" );
	MPI_Abort( comm, 1 );
      }
      
      /* Load up the buffers */
      for (i=0; i<size*size; i++) {
	sbuf[i] = i + 100*rank;
	rbuf[i] = -i;
      }
      
      /* Create and load the arguments to alltoallv */
      sendcounts = (int *)malloc( size * sizeof(int) );
      recvcounts = (int *)malloc( size * sizeof(int) );
      rdispls    = (int *)malloc( size * sizeof(int) );
      sdispls    = (int *)malloc( size * sizeof(int) );
      sendtypes    = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
      recvtypes    = (MPI_Datatype *)malloc( size * sizeof(MPI_Datatype) );
      if (!sendcounts || !recvcounts || !rdispls || !sdispls || !sendtypes || !recvtypes) {
	fprintf( stderr, "Could not allocate arg items!\n" );
	MPI_Abort( comm, 1 );
      }
      /* Note that process 0 sends no data (sendcounts[0] = 0) */
      for (i=0; i<size; i++) {
	sendcounts[i] = i;
	recvcounts[i] = rank;
	rdispls[i]    = i * rank * sizeof(int);
	sdispls[i]    = (((i+1) * (i))/2) * sizeof(int);
        sendtypes[i] = recvtypes[i] = MPI_INT;
      }
      MPI_Alltoallw( sbuf, sendcounts, sdispls, sendtypes,
		     rbuf, recvcounts, rdispls, recvtypes, comm );
      
      /* Check rbuf */
      for (i=0; i<size; i++) {
	p = rbuf + rdispls[i]/sizeof(int);
	for (j=0; j<rank; j++) {
	  if (p[j] != i * 100 + (rank*(rank+1))/2 + j) {
	    fprintf( stderr, "[%d] got %d expected %d for %dth\n",
		     rank, p[j],(i*(i+1))/2 + j, j );
	    err++;
	  }
	}
      }

      free(sendtypes);
      free(sdispls);
      free(sendcounts);
      free(sbuf);

#if MTEST_HAVE_MIN_MPI_VERSION(2,2)
      /* check MPI_IN_PLACE, added in MPI-2.2 */
      free( rbuf );
      rbuf = (int *)malloc( size * (2 * size) * sizeof(int) );
      if (!rbuf) {
        fprintf( stderr, "Could not reallocate rbuf!\n" );
        MPI_Abort( comm, 1 );
      }

      /* Load up the buffers */
      for (i = 0; i < size; i++) {
        /* alltoallw displs are in bytes, not in type extents */
        rdispls[i]    = i * (2 * size) * sizeof(int);
        recvtypes[i]  = MPI_INT;
        recvcounts[i] = i + rank;
      }
      memset(rbuf, -1, size * (2 * size) * sizeof(int));
      for (i=0; i < size; i++) {
        p = rbuf + (rdispls[i] / sizeof(int));
        for (j = 0; j < recvcounts[i]; ++j) {
          p[j] = 100 * rank + 10 * i + j;
        }
      }

      MPI_Alltoallw( MPI_IN_PLACE, NULL, NULL, NULL,
                     rbuf, recvcounts, rdispls, recvtypes, comm );

      /* Check rbuf */
      for (i=0; i<size; i++) {
        p = rbuf + (rdispls[i] / sizeof(int));
        for (j=0; j<recvcounts[i]; j++) {
          int expected = 100 * i + 10 * rank + j;
          if (p[j] != expected) {
            fprintf(stderr, "[%d] got %d expected %d for block=%d, element=%dth\n",
                    rank, p[j], expected, i, j);
            ++err;
          }
        }
      }
#endif

      free(recvtypes);
      free(rdispls);
      free(recvcounts);
      free(rbuf);
      MTestFreeComm( &comm );
    }

    MTest_Finalize( err );
    MPI_Finalize();
    return 0;
}
コード例 #30
0
ファイル: pingping.c プロジェクト: jeffhammond/mpich
int main(int argc, char *argv[])
{
    int errs = 0, err;
    int rank, size, source, dest;
    int minsize = 2, count[2], nmsg, maxmsg;
    int i, j, len;
    MPI_Aint sendcount, recvcount;
    MPI_Comm comm;
    MPI_Datatype sendtype, recvtype;
    DTP_t send_dtp, recv_dtp;
    char send_name[MPI_MAX_OBJECT_NAME] = { 0 };
    char recv_name[MPI_MAX_OBJECT_NAME] = { 0 };
    void *sendbuf, *recvbuf;

    MTest_Init(&argc, &argv);

#ifndef USE_DTP_POOL_TYPE__STRUCT       /* set in 'test/mpi/structtypetest.txt' to split tests */
    MPI_Datatype basic_type;
    char type_name[MPI_MAX_OBJECT_NAME] = { 0 };

    err = MTestInitBasicPt2ptSignature(argc, argv, count, &basic_type);
    if (err)
        return MTestReturnValue(1);

    err = DTP_pool_create(basic_type, count[0], &send_dtp);
    if (err != DTP_SUCCESS) {
        MPI_Type_get_name(basic_type, type_name, &len);
        fprintf(stdout, "Error while creating send pool (%s,%d)\n", type_name, count[0]);
        fflush(stdout);
    }

    err = DTP_pool_create(basic_type, count[1], &recv_dtp);
    if (err != DTP_SUCCESS) {
        MPI_Type_get_name(basic_type, type_name, &len);
        fprintf(stdout, "Error while creating recv pool (%s,%d)\n", type_name, count[1]);
        fflush(stdout);
    }
#else
    MPI_Datatype *basic_types = NULL;
    int *basic_type_counts = NULL;
    int basic_type_num;

    err = MTestInitStructSignature(argc, argv, &basic_type_num, &basic_type_counts, &basic_types);
    if (err)
        return MTestReturnValue(1);

    err = DTP_pool_create_struct(basic_type_num, basic_types, basic_type_counts, &send_dtp);
    if (err != DTP_SUCCESS) {
        fprintf(stdout, "Error while creating struct pool\n");
        fflush(stdout);
    }

    err = DTP_pool_create_struct(basic_type_num, basic_types, basic_type_counts, &recv_dtp);
    if (err != DTP_SUCCESS) {
        fprintf(stdout, "Error while creating struct pool\n");
        fflush(stdout);
    }

    /* these are ignored */
    count[0] = 0;
    count[1] = 0;
#endif

    /* The following illustrates the use of the routines to
     * run through a selection of communicators and datatypes.
     * Use subsets of these for tests that do not involve combinations
     * of communicators, datatypes, and counts of datatypes */
    while (MTestGetIntracommGeneral(&comm, minsize, 1)) {
        if (comm == MPI_COMM_NULL)
            continue;
        /* Determine the sender and receiver */
        MPI_Comm_rank(comm, &rank);
        MPI_Comm_size(comm, &size);
        source = 0;
        dest = size - 1;

        /* To improve reporting of problems about operations, we
         * change the error handler to errors return */
        MPI_Comm_set_errhandler(comm, MPI_ERRORS_RETURN);

        for (i = 0; i < send_dtp->DTP_num_objs; i++) {
            err = DTP_obj_create(send_dtp, i, 0, 1, count[0]);
            if (err != DTP_SUCCESS) {
                errs++;
                break;
            }

            sendcount = send_dtp->DTP_obj_array[i].DTP_obj_count;
            sendtype = send_dtp->DTP_obj_array[i].DTP_obj_type;
            sendbuf = send_dtp->DTP_obj_array[i].DTP_obj_buf;

            for (j = 0; j < recv_dtp->DTP_num_objs; j++) {
                int nbytes;
                MPI_Type_size(sendtype, &nbytes);
                maxmsg = MAX_COUNT - count[0];

                err = DTP_obj_create(recv_dtp, j, 0, 0, 0);
                if (err != DTP_SUCCESS) {
                    errs++;
                    break;
                }

                recvcount = recv_dtp->DTP_obj_array[j].DTP_obj_count;
                recvtype = recv_dtp->DTP_obj_array[j].DTP_obj_type;
                recvbuf = recv_dtp->DTP_obj_array[j].DTP_obj_buf;

                /* We may want to limit the total message size sent */
                if (nbytes > MAX_MSG_SIZE) {
                    continue;
                }

                if (rank == source) {
                    MPI_Type_get_name(sendtype, send_name, &len);
                    MTestPrintfMsg(1, "Sending count = %d of sendtype %s of total size %d bytes\n",
                                   count[0], send_name, nbytes * count[0]);

                    for (nmsg = 1; nmsg < maxmsg; nmsg++) {
                        err = MPI_Send(sendbuf, sendcount, sendtype, dest, 0, comm);
                        if (err) {
                            errs++;
                            if (errs < 10) {
                                MTestPrintError(err);
                            }
                        }
                    }
                } else if (rank == dest) {
                    for (nmsg = 1; nmsg < maxmsg; nmsg++) {
                        err =
                            MPI_Recv(recvbuf, recvcount, recvtype, source, 0, comm,
                                     MPI_STATUS_IGNORE);
                        if (err) {
                            errs++;
                            if (errs < 10) {
                                MTestPrintError(err);
                            }
                        }

                        err = DTP_obj_buf_check(recv_dtp, j, 0, 1, count[0]);
                        if (err != DTP_SUCCESS) {
                            if (errs < 10) {
                                MPI_Type_get_name(sendtype, send_name, &len);
                                MPI_Type_get_name(recvtype, recv_name, &len);
                                fprintf(stdout,
                                        "Data in target buffer did not match for destination datatype %s and source datatype %s, count = %d, message iteration %d of %d\n",
                                        recv_name, send_name, count[0], nmsg, maxmsg);
                                fflush(stdout);
                            }
                            errs++;
                        }
                    }
                }
                DTP_obj_free(recv_dtp, j);
            }
            DTP_obj_free(send_dtp, i);
        }
        MTestFreeComm(&comm);
    }

    DTP_pool_free(send_dtp);
    DTP_pool_free(recv_dtp);

#ifdef USE_DTP_POOL_TYPE__STRUCT
    /* cleanup array if any */
    if (basic_types) {
        free(basic_types);
    }
    if (basic_type_counts) {
        free(basic_type_counts);
    }
#endif

    MTest_Finalize(errs);
    return MTestReturnValue(errs);
}