Esempio n. 1
0
int main(int argc, char **argv)
{
	char *buf;
	char err[200];
	int i, iam;
	MPI_Init(&argc, &argv);
	Test_Init_No_File();

	MPI_Barrier(MPI_COMM_WORLD);
	buf = (char *)malloc(32 * 1024);
	MPI_Comm_rank(MPI_COMM_WORLD, &iam);
	for(i = 1; i <= 32; i++) {
		if (iam == 0) {
			*buf = i;
#ifdef VERBOSE
			printf("Broadcasting %d bytes\n", i * 64);
#endif
		}
		MPI_Bcast(buf, i * 64, MPI_BYTE, 0, MPI_COMM_WORLD);
		if (*buf != i) {
			sprintf(err, "Broadcast of %d bytes", i * 64);
			Test_Failed(err);
		}
		/* gsync(); */
		MPI_Barrier(MPI_COMM_WORLD);
	}
	Test_Waitforall();
	Test_Global_Summary();
	MPI_Finalize();
	free(buf);

	return 0;
}
Esempio n. 2
0
int main(int argc, char **argv)
{
	int   rank, size, i, j;
	int **table;
	int  *row;
	int   errors = 0;
	int  *displs;
	int  *send_counts;
	int   recv_count;

	MPI_Init(&argc, &argv);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	MPI_Comm_size(MPI_COMM_WORLD, &size);

	table = (int**) malloc(size * sizeof(int*));
	table[0] = (int*) malloc(size * size * sizeof(int));
	for(i = 1; i < size; i++)
		table[i] = table[i - 1] + size;
	row = (int*) malloc(size * sizeof(int));
	displs = (int*) malloc(size * sizeof(int));
	send_counts = (int*) malloc(size * sizeof(int));
	recv_count = size;

	/* If I'm the root (process 0), then fill out the big table and setup
	 * send_counts and displs arrays */
	if (rank == 0) {
		for (i = 0; i < size; i++) {
			send_counts[i] = recv_count;
			displs[i] = i * size;
			for (j = 0; j < size; j++)
				table[i][j] = i + j;
		}
	}
	/* Scatter the big table to everybody's little table */
	MPI_Scatterv(&table[0][0], send_counts, displs, MPI_INT,
			&row[0]     , recv_count, MPI_INT, 0, MPI_COMM_WORLD);

	/* Now see if our row looks right */
	for (i = 0; i < size; i++) {
		if (row[i] != i + rank)
			Test_Failed(NULL);
	}
	Test_Waitforall();
	errors = Test_Global_Summary();
	MPI_Finalize();
	free(displs);
	free(row);
	free(table[0]);
	free(table);

	return errors;
}
Esempio n. 3
0
int main(int argc, char **argv)
{
	int datasize;
	int *sbuf, *rbuf;
	int rank, size;
	int sendcount, *recvcounts, *displs;
	int i, j, k, *p;
	char errmsg[200];
	int bufsize;

	MPI_Init(&argc, &argv);
	Test_Init_No_File();

	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);

	/* is the buffersize specified? */
	if (argc == 2) {
		datasize = atoi(argv[1]);
		if (datasize == 0) {
			fprintf(stderr, "Invalid data size!\n");
			MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
		}
	} else {
		datasize = DATASIZE;
	}

	/* Create the buffers */
	sbuf = (int *) malloc((rank + 1) * datasize * sizeof(int));

	rbuf = (int *) malloc( (size * (size+1) / 2 * datasize  + (size-1)) * sizeof(int) );
	recvcounts = (int *) malloc(size * sizeof(int));
	displs     = (int *) malloc(size * sizeof(int));
	if (!sbuf || !rbuf || !recvcounts || !displs) {
		fprintf(stderr, "Could not allocate buffers!\n");
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}

	/* Load up the buffers */
	for (i = 0; i < rank + 1; i++) {
		for (j = 0; j < datasize; j++)
			sbuf[i * datasize + j] = i + 100 * rank;
	}
	for (i = 0; i < size * (size + 1) / 2 * datasize + (size-1); i++) {
		rbuf[i] = -(i + 1);
	}

	/* Create the arguments to MPI_Allgatherv() */
	sendcount = (rank + 1) * datasize;
	j = 0;
	for (i = 0; i < size; i++) {
		recvcounts[i] = (i + 1) * datasize;
		displs[i] = j;
		j += (i + 1) * datasize + 1;
	}

	MPI_Allgatherv(sbuf, sendcount, MPI_INT,
				   rbuf, recvcounts, displs, MPI_INT, MPI_COMM_WORLD);

	/* Check rbuf */
	p = rbuf;
	for (i = 0; i < size; i++) {
		for (j = 0; j < i + 1; j++) {
			for (k = 0; k < datasize; k++) {
				if (p[j * datasize + k] != j + 100 * i) {
					sprintf(errmsg, "[%d] got %d expected %d for %dth\n", rank, p[j * datasize + k], j + 100 * i, j * datasize + k);
					Test_Message( errmsg );
					Test_Failed( NULL );
				}
			}
		}
		p += (i + 1) * datasize + 1;
	}

	free(rbuf);
	free(sbuf);
	free(recvcounts);
	free(displs);

	Test_Waitforall();
	Test_Global_Summary();

	MPI_Finalize();
	exit( EXIT_SUCCESS );
}
Esempio n. 4
0
int main(int argc, char *argv[])
{
	int rank, nprocs, i;
	int *A, *B;

	MPI_Win win;

	MPI_Init(&argc,&argv);
	Test_Init_No_File();
	MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);

	if (nprocs != 2) {
		printf("Run this program with 2 processes\n");
		MPI_Abort(MPI_COMM_WORLD,1);
	}

	i = MPI_Alloc_mem(SIZE * sizeof(int), MPI_INFO_NULL, &A);
	if (i) {
		printf("Can't allocate memory in test program\n");
		MPI_Abort(MPI_COMM_WORLD, 1);
	}
	i = MPI_Alloc_mem(SIZE * sizeof(int), MPI_INFO_NULL, &B);
	if (i) {
		printf("Can't allocate memory in test program\n");
		MPI_Abort(MPI_COMM_WORLD, 1);
	}

	if (rank == 0) {
		for (i=0; i<SIZE; i++)
			A[i] = B[i] = i;
	}
	else {
		for (i=0; i<SIZE; i++) {
			A[i] = (-3)*i;
			B[i] = (-4)*i;
		}
	}

	MPI_Win_create(B, SIZE*sizeof(int), sizeof(int), MPI_INFO_NULL,
			MPI_COMM_WORLD, &win);

	MPI_Win_fence(0, win);

	if (rank == 0) {
		for (i=0; i<SIZE-1; i++)
			MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win);
	}
	else {
		for (i=0; i<SIZE-1; i++)
			MPI_Get(A+i, 1, MPI_INT, 0, i, 1, MPI_INT, win);

		MPI_Accumulate(A+i, 1, MPI_INT, 0, i, 1, MPI_INT, MPI_SUM, win);
	}
	MPI_Win_fence(0, win);

	if (rank == 1) {
		for (i=0; i<SIZE-1; i++) {
			if (A[i] != B[i]) {
				printf("Put/Get Error: A[i]=%d, B[i]=%d\n", A[i], B[i]);
				Test_Failed(NULL);
			}
		}
	}
	else {
		if (B[SIZE-1] != SIZE - 1 - 3*(SIZE-1)) {
			printf("Accumulate Error: B[SIZE-1] is %d, should be %d\n", B[SIZE-1], SIZE - 1 - 3*(SIZE-1));
			Test_Failed(NULL);
		}
	}

	MPI_Win_free(&win);

	MPI_Free_mem(A);
	MPI_Free_mem(B);

	Test_Waitforall();
	Test_Global_Summary();

	MPI_Finalize();
	return 0;
}
Esempio n. 5
0
int
main( int argc, char **argv)
{
    int rank, size, i, recv_flag, ret, passed;
    MPI_Status Status;
    char message[17];
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    if (rank == 0) {
	Test_Init("barrier", rank);
	/* Receive the startup messages from each of the 
	   other clients */
	for (i = 0; i < size - 1; i++) {
	    MPI_Recv(message, 17, MPI_CHAR, MPI_ANY_SOURCE, 2000, 
		     MPI_COMM_WORLD, &Status);
	}

	/* Now use Iprobe to make sure no more messages arive for a
	    while */
	passed = 1;
	for (i = 0; i < WAIT_TIMES; i++){
	    recv_flag = 0;
	    MPI_Iprobe(MPI_ANY_SOURCE, 2000, MPI_COMM_WORLD, 
		       &recv_flag, &Status);
	    if (recv_flag)
		passed = 0;
	}

	if (passed)
	    Test_Passed("Barrier Test 1");
	else
	    Test_Failed("Barrier Test 1");

	/* Now go into the barrier myself */
	MPI_Barrier(MPI_COMM_WORLD);

	/* And get everyones message who came out */
	for (i = 0; i < size - 1; i++) {
	    MPI_Recv(message, 13, MPI_CHAR, MPI_ANY_SOURCE, 2000, 
		     MPI_COMM_WORLD, &Status);
	}

	/* Now use Iprobe to make sure no more messages arive for a
	    while */
	passed = 1;
	for (i = 0; i < WAIT_TIMES; i++){
	    recv_flag = 0;
	    MPI_Iprobe(MPI_ANY_SOURCE, 2000, MPI_COMM_WORLD, 
		       &recv_flag, &Status);
	    if (recv_flag)
		passed = 0;
	}
	if (passed)
	    Test_Passed("Barrier Test 2");
	else
	    Test_Failed("Barrier Test 2");

	Test_Waitforall( );
	ret = Summarize_Test_Results();
	Test_Finalize();
	MPI_Finalize();
	return ret;
    } else {
	MPI_Send((char*)"Entering Barrier", 17, MPI_CHAR, 0, 2000, MPI_COMM_WORLD);
	MPI_Barrier(MPI_COMM_WORLD);
	MPI_Send((char*)"Past Barrier", 13, MPI_CHAR, 0, 2000, MPI_COMM_WORLD);
	Test_Waitforall( );
	MPI_Finalize();
	return 0;
    }
}
Esempio n. 6
0
int main(int argc, char **argv)
{
	int rank, size, i, j;
	int table[MAX_PROCESSES][MAX_PROCESSES];
	int errors = 0;
	int block_size, begin_row, end_row, send_count, recv_count;

	DBM("calling MPI_Init\n");
	MPI_Init(&argc, &argv);
	DBM("calling MPI_Comm_rank\n");
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);
	DBM("calling MPI_Comm_size\n");
	MPI_Comm_size(MPI_COMM_WORLD, &size);

	for (i = 0; i < MAX_PROCESSES; i++)
		for (j = 0; j < MAX_PROCESSES; j++)
			table[i][j]=-1;

	/* Determine what rows are my responsibility */
	block_size = MAX_PROCESSES / size;
	begin_row  = rank * block_size;
	end_row    = (rank + 1) * block_size;
	send_count = block_size * MAX_PROCESSES;
	recv_count = send_count;

	/* A maximum of MAX_PROCESSES processes can participate */
	if (size > MAX_PROCESSES) {
		fprintf(stderr, "Number of processors is maximal %d\n",
				MAX_PROCESSES);
		DBM("calling MPI_Abort\n");
		MPI_Abort(MPI_COMM_WORLD, 1);
	}

	

	/* Paint my rows my color */
	for (i = begin_row; i < end_row ;i++)
		for (j = 0; j < MAX_PROCESSES; j++)
			table[i][j] = rank + 10;

#ifdef PRINTTABLE
	/* print table */
	printf("\n");
	for (i = 0; i < MAX_PROCESSES; i++)
	{
		for (j = 0; j < MAX_PROCESSES; j++)
			printf("%4d", table[i][j]);
		printf("\n");
	}
#endif
	DBM("starting algather\n");
	/* Everybody gets the gathered table */
	MPI_Allgather(&table[begin_row][0], send_count, MPI_INT,
			&table[0][0],         recv_count, MPI_INT,
			MPI_COMM_WORLD);

	/* Everybody should have the same table now.
	 * This test does not in any way guarantee there are no errors.
	 * Print out a table or devise a smart test to make sure it's correct */
#ifdef PRINTTABLE
	/* print table */
	printf("\n");
	for (i = 0; i < MAX_PROCESSES; i++)
	{
		for (j = 0; j < MAX_PROCESSES; j++)
			printf("%4d", table[i][j]);
		printf("\n");
	}
#endif

	for (i = 0; i < MAX_PROCESSES; i++) {
		if (table[i][0] - table[i][MAX_PROCESSES - 1] !=0)
		{
			Test_Failed(NULL);
			printf("error at i=%d\n", i);
		}
	}

	Test_Waitforall();
	errors = Test_Global_Summary();
	MPI_Finalize();

	return errors;
}
Esempio n. 7
0
int main(int argc, char *argv[])
{
	int rank, nprocs, A[SIZE2], B[SIZE2], i;
	MPI_Win win;

	MPI_Init(&argc,&argv);
	Test_Init_No_File();
	MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);

	if (nprocs != 2) {
		printf("Run this program with 2 processes\n");
		MPI_Abort(MPI_COMM_WORLD,1);
	}

	if (rank == 0) {
		for (i = 0; i < SIZE2; i++)
			A[i] = B[i] = i;
		MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &win);

		for (i = 0; i < SIZE1; i++) {
			MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win);
			MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win);
			MPI_Win_unlock(1, win);
		}

		for (i = 0; i < SIZE1; i++) {
			MPI_Win_lock(MPI_LOCK_SHARED, 1, 0, win);
			MPI_Get(B+i, 1, MPI_INT, 1, SIZE1+i, 1, MPI_INT, win);
			MPI_Win_unlock(1, win);
		}

		MPI_Win_free(&win);

		for (i = 0; i < SIZE1; i++)
			if (B[i] != (-4) * (i + SIZE1)) {
				printf("Get Error: B[%d] is %d, should be %d\n", i, B[i], (-4) * (i + SIZE1));
				Test_Failed(NULL);
			}
	}

	else {  /* rank=1 */
		for (i = 0; i < SIZE2; i++)
			B[i] = (-4) * i;
		MPI_Win_create(B, SIZE2 * sizeof(int), sizeof(int), MPI_INFO_NULL,
				MPI_COMM_WORLD, &win);

		MPI_Win_free(&win);

		for (i = 0; i < SIZE1; i++) {
			if (B[i] != i) {
				printf("Put Error: B[%d] is %d, should be %d\n", i, B[i], i);
				Test_Failed(NULL);
			}
		}
	}

	Test_Waitforall();
	Test_Global_Summary();
	MPI_Finalize();
	return 0;
}
Esempio n. 8
0
int main(int argc, char **argv)
{
    int rank, size, i, j;
    int table[MAX_PROCESSES][MAX_PROCESSES];
    int errors = 0;
    int displs[MAX_PROCESSES];
    int recv_counts[MAX_PROCESSES];
    int block_size, begin_row, end_row, send_count;

    for(i = 1; i < MAX_PROCESSES; i++)
        for(j = 1; j < MAX_PROCESSES; j++)
            table[i][j] = 0;

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &size);

    /* Determine what rows are my responsibility */
    block_size = MAX_PROCESSES / size;
    begin_row  = rank * block_size;
    end_row    = (rank + 1) * block_size;
    send_count = block_size * MAX_PROCESSES;

    /* A maximum of MAX_PROCESSES processes can participate */
    if (size > MAX_PROCESSES) {
        fprintf(stderr, "Number of processors is maximum %d\n",
                MAX_PROCESSES);
        fflush(stderr);
        DBM("calling MPI_Abort\n");
        MPI_Abort(MPI_COMM_WORLD, 1);
    }



    /* Fill in the displacements and recv_counts */
    for (i = 0; i < size; i++) {
        displs[i]      = i * block_size * MAX_PROCESSES;
        recv_counts[i] = send_count;
    }

    /* Paint my rows my color */
    for (i = begin_row; i < end_row; i++)
        for (j = 0; j < MAX_PROCESSES; j++)
            table[i][j] = rank + 10;

    /* Everybody gets the gathered data */
    MPI_Allgatherv(&table[begin_row][0], send_count, MPI_INT,
                   &table[0][0], recv_counts, displs, MPI_INT,
                   MPI_COMM_WORLD);

    /* Everybody should have the same table now.
     *
     * The entries are:
     *  Table[i][j] = i / block_size + 10;
     */
    for (i = 0; i < size; i++)
        if (table[i][0] - table[i][MAX_PROCESSES - 1] != 0)
            Test_Failed(NULL);

    for (i = 0; i < size; i++)
        for (j = 0; j < MAX_PROCESSES; j++)
            if (table[i][j] != i / block_size + 10)
                Test_Failed(NULL);

    errors = Test_Global_Summary();
    if (errors) {
        /* Print out table if there are any errors */
        for (i = 0; i < size; i++) {
            printf("\n");
            for (j = 0; j < MAX_PROCESSES; j++)
                printf("  %d", table[i][j]);
        }
        printf("\n");
    }

    Test_Waitforall();
    MPI_Finalize();
    return errors;
}
Esempio n. 9
0
int main( int argc, char **argv)
{
    int rank; /* My Rank (0 or 1) */
    int act_size = 0;
    int flag, np, rval, i;
    int buffer[SIZE];
    double t0;
    char *Current_Test = NULL;
    MPI_Status status, status1, status2;
    int count1, count2;
    int sizes[4];

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size( MPI_COMM_WORLD, &np );
    /*if (np != 2) {
        fprintf(stderr, "*** This program uses exactly 2 processes! ***\n");
        MPI_Abort( MPI_COMM_WORLD, 1 );
        }*/

    sizes[0] = 0;
    sizes[1] = 1;
    sizes[2] = 1000;
    sizes[3] = SIZE;
/*    for (i = 0; i < 4; i++ ) { */
    for (i = 1; i < 2; i++ ) {
	act_size = sizes[i];
        if (rank == src) { 
            Generate_Data(buffer, SIZE);
            MPI_Recv( buffer, 0, MPI_INT, dest, 0, MPI_COMM_WORLD, &status );
            MPI_Send( buffer, 0, MPI_INT, dest, 0, MPI_COMM_WORLD );
            MPI_Ssend( buffer, act_size, MPI_INT, dest, 1, MPI_COMM_WORLD );
            MPI_Ssend( buffer, act_size, MPI_INT, dest, 2, MPI_COMM_WORLD );
            
        } else if (rank == dest) {
            Test_Init("ssendtest", rank);
            /* Test 1 */
            Current_Test = "Ssend Test (Synchronous Send -> Normal Recieve)";
            MPI_Send( buffer, 0, MPI_INT, src, 0, MPI_COMM_WORLD );
            MPI_Recv( buffer, 0, MPI_INT, src, 0, MPI_COMM_WORLD, &status );
            t0 = MPI_Wtime();
            flag = 0;
	    /* This test depends on a working wtime.  Make a simple check */
	    if (t0 == 0 && MPI_Wtime() == 0) {
		fprintf( stderr, 
		 "MPI_WTIME is returning 0; a working value is needed\n\
for this test.\n" );
		Test_Failed(Current_Test);
		fprintf( stderr, "[%i] Aborting\n",rank );fflush(stderr);
		MPI_Abort( MPI_COMM_WORLD, 1 );
	    }
            while (MPI_Wtime() - t0 < MAX_TIME) {
                MPI_Iprobe( src, 2, MPI_COMM_WORLD, &flag, &status );
                if (flag) {
                    Test_Failed(Current_Test);
                    break;
                    }
                }
            if (!flag) 
                Test_Passed(Current_Test);
            MPI_Recv( buffer, act_size, MPI_INT, src, 1, MPI_COMM_WORLD, 
                     &status1 );
            MPI_Recv( buffer, act_size, MPI_INT, src, 2, MPI_COMM_WORLD, 
                     &status2 );
            
            MPI_Get_count( &status1, MPI_INT, &count1 );
            MPI_Get_count( &status2, MPI_INT, &count2 );
            if (count1 != act_size) {
                fprintf( stdout, 
                        "(1) Wrong count from recv of ssend: got %d (%d)\n", 
                        count1, act_size );
                }
            if (status1.MPI_TAG != 1) {
                fprintf( stdout, "(1) Wrong tag from recv of ssend: got %d\n", 
                        status1.MPI_TAG );
                }
            if (count2 != act_size) {
                fprintf( stdout, 
                        "(2) Wrong count from recv of ssend: got %d (%d)\n", 
                        count1, act_size );
                }
            if (status2.MPI_TAG != 2) {
                fprintf( stdout, "(2) Wrong tag from recv of ssend: got %d\n", 
                        status2.MPI_TAG );
                }

            }
Esempio n. 10
0
int main(int argc, char **argv)
{
	int datasize;
	int *sbuf, *rbuf;
	int rank, size;
	int i, j, *p;
	char errmsg[200];

	MPI_Init(&argc, &argv);
	Test_Init_No_File();

	MPI_Comm_size(MPI_COMM_WORLD, &size);
	MPI_Comm_rank(MPI_COMM_WORLD, &rank);

	/* is the buffersize specified? */
	if (argc == 2) {
		datasize = atoi(argv[1]);
		if (datasize <= 0) {
			fprintf(stderr, "Invalid data size!\n");
			MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
		}
	} else {
		datasize = DATASIZE;
	}

	/* Create the buffers */
	sbuf = (int *) malloc(datasize * sizeof(int));
	rbuf = (int *) malloc(size * datasize * sizeof(int));
	if (!sbuf || !rbuf) {
		fprintf(stderr, "Could not allocate buffers!\n");
		MPI_Abort(MPI_COMM_WORLD, EXIT_FAILURE);
	}

	/* Load up the buffers */
	for (i = 0; i < datasize; i++) {
		sbuf[i] = i + 100 * rank;
	}
	for (i = 0; i < size * datasize; i++) {
		rbuf[i] = -(i + 1);
	}

	MPI_Allgather(sbuf, datasize, MPI_INT, rbuf, datasize, MPI_INT, MPI_COMM_WORLD);

	/* Check rbuf */
	for (i = 0; i < size; i++) {
		p = rbuf + i * datasize;
		for (j = 0; j < datasize; j++) {
			if (p[j] != j + 100 * i) {
				sprintf(errmsg, "[%d] got %d expected %d for %dth\n", rank, p[j], j + 100 * i, i * datasize + j);
				Test_Message( errmsg );
				Test_Failed( NULL );
			}
		}
	}

	free(rbuf);
	free(sbuf);

	Test_Waitforall();
	Test_Global_Summary();

	MPI_Finalize();
	exit( EXIT_SUCCESS );
}
Esempio n. 11
0
int main(int argc, char *argv[])
{
	int rank, destrank, nprocs, A[SIZE2], B[SIZE2], i;
	MPI_Group comm_group, group;
	MPI_Win win;

	MPI_Init(&argc,&argv);
	Test_Init_No_File();
	MPI_Comm_size(MPI_COMM_WORLD,&nprocs);
	MPI_Comm_rank(MPI_COMM_WORLD,&rank);

	if (nprocs != 2) {
		printf("Run this program with 2 processes\n");
		MPI_Abort(MPI_COMM_WORLD,1);
	}

	MPI_Comm_group(MPI_COMM_WORLD, &comm_group);

	if (rank == 0) {
		for (i=0; i<SIZE2; i++) A[i] = B[i] = i;
		MPI_Win_create(NULL, 0, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &win);
		destrank = 1;
		MPI_Group_incl(comm_group, 1, &destrank, &group);
		MPI_Win_start(group, 0, win);
		for (i=0; i<SIZE1; i++)
			MPI_Put(A+i, 1, MPI_INT, 1, i, 1, MPI_INT, win);
		for (i=0; i<SIZE1; i++)
			MPI_Get(B+i, 1, MPI_INT, 1, SIZE1+i, 1, MPI_INT, win);

		MPI_Win_complete(win);

		for (i=0; i<SIZE1; i++)
			if (B[i] != (-4)*(i+SIZE1)) {
				printf("Get Error: B[i] is %d, should be %d\n", B[i], (-4)*(i+SIZE1));
				Test_Failed(NULL);
			}
	}

	else {  /* rank=1 */
		for (i=0; i<SIZE2; i++) B[i] = (-4)*i;
		MPI_Win_create(B, SIZE2*sizeof(int), sizeof(int), MPI_INFO_NULL,
				MPI_COMM_WORLD, &win);
		destrank = 0;
		MPI_Group_incl(comm_group, 1, &destrank, &group);
		MPI_Win_post(group, 0, win);
		MPI_Win_wait(win);

		for (i=0; i<SIZE1; i++) {
			if (B[i] != i) {
				printf("Put Error: B[i] is %d, should be %d\n", B[i], i);
				Test_Failed(NULL);
			}
		}
	}

	MPI_Group_free(&group);
	MPI_Group_free(&comm_group);
	MPI_Win_free(&win);

	Test_Waitforall();
	Test_Global_Summary();

	MPI_Finalize();
	return 0;
}
Esempio n. 12
0
int main( int argc, char **argv )
{
    int rank, size, ret, passed, i, *test_array;
    int stride, count, root;
    MPI_Datatype newtype;
    MPI_Comm     comm = MPI_COMM_WORLD;

    /* Set up MPI */
    MPI_Init(&argc, &argv);
    MPI_Comm_rank(comm, &rank);

    /* Setup the tests */
    Test_Init("bcastvec", rank);

    /* Allow for additional communicators */
    MPI_Comm_size(comm, &size);
    /* MPI_Comm_rank(comm, &rank); */
    stride = (rank + 1);
    test_array = (int *)malloc(size*stride*sizeof(int));

    /* Create the vector datatype EXCEPT for process 0 (vector of
       stride 1 is contiguous) */
    if (rank > 0) {
        count = 1;
        MPI_Type_vector( size, 1, stride, MPI_INT, &newtype);
        MPI_Type_commit( &newtype );
    }
    else {
        count = size;
        newtype = MPI_INT;
    }

    /* Perform the test.  Each process in turn becomes the root.
       After each operation, check that nothing has gone wrong */
    passed = 1;
    for (root = 0; root < size; root++) {
        /* Fill the array with -1 for unset, rank + i * size for set */
        for (i=0; i<size*stride; i++) test_array[i] = -1;
        if (rank == root)
            for (i=0; i<size; i++) test_array[i*stride] = rank + i * size;
        MPI_Bcast( test_array, count, newtype, root, comm );
        for (i=0; i<size; i++) {
            if (test_array[i*stride] != root + i * size) {
                passed = 0;
            }
        }
    }
    free(test_array);
    if (rank != 0) MPI_Type_free( &newtype );

    if (!passed)
        Test_Failed("Simple Broadcast test with datatypes");
    else {
        if (rank == 0)
            Test_Passed("Simple Broadcast test with datatypes");
    }

    /* Close down the tests */
    if (rank == 0)
        ret = Summarize_Test_Results();
    else {
        ret = 0;
    }
    Test_Finalize();

    /* Close down MPI */
    Test_Waitforall( );
    MPI_Finalize();
    return ret;
}
Esempio n. 13
0
int main(int argc, char **argv)
{
	int rank, size;
	int i, j, k;
	int **table, *buffer;
	int begin_row, end_row, send_count, *recv_counts, *displs;
	char errmsg[200];

	MPI_Init( &argc, &argv );
	Test_Init_No_File();
	
	MPI_Comm_rank( MPI_COMM_WORLD, &rank );
	MPI_Comm_size( MPI_COMM_WORLD, &size );

	/* get buffer space and init table */
	buffer      = (int *) malloc( (size * BLOCKSIZE) * (size * BLOCKSIZE) * sizeof(int) );
	table       = (int **)malloc( (size * BLOCKSIZE) * sizeof(int *) );
	recv_counts = (int *) malloc( size * sizeof(int) );
	displs      = (int *) malloc( size * sizeof(int) );
	if( !buffer || !table || !recv_counts || !displs ) {
		fprintf( stderr, "Out of memory error!\n" );
		MPI_Abort( MPI_COMM_WORLD, EXIT_FAILURE );
	}
	for( i = 0; i < size * BLOCKSIZE; i++ )
		table[i] = &(buffer[i*size*BLOCKSIZE]);

	/* Determine what rows are my responsibility */
	begin_row = rank * BLOCKSIZE;
	end_row   = (rank + 1) * BLOCKSIZE;
	send_count = BLOCKSIZE * size * BLOCKSIZE;
	for( i = 0; i < size; i++ ) {
		recv_counts[i] = BLOCKSIZE * size * BLOCKSIZE;
		displs[i]      = i * BLOCKSIZE * size * BLOCKSIZE;
	}

	/* Paint my rows my color */
	for( i = begin_row; i < end_row ; i++ )
		for( j = 0; j < size * BLOCKSIZE; j++ )
			table[i][j] = rank + 10;

	/* Gather everybody's result together - sort of like an */
	/* inefficient allgather */
	for (i = 0; i < size; i++)
		MPI_Gatherv(table[begin_row], send_count, MPI_INT,
					table[0], recv_counts, displs, MPI_INT, i,
					MPI_COMM_WORLD);

	/* Everybody should have the same table now. */
	for( i = 0; i < size; i++ )
		for( j = 0; j < BLOCKSIZE; j++ )
			for( k = 0; k < size * BLOCKSIZE; k++ )
				if( table[i*BLOCKSIZE+j][k] != i + 10 ) {
					sprintf(errmsg, "[%d] got %d expected %d for %dth entry in row %d\n",
							rank, table[i*BLOCKSIZE+j][k], i + 10, k, i*BLOCKSIZE + j);
					Test_Message( errmsg );
					Test_Failed( NULL );
				}
	
	Test_Waitforall();
	Test_Global_Summary();

	free( buffer );
	free( table );
	free( recv_counts );
	free( displs );

	MPI_Finalize();
	exit( EXIT_SUCCESS );
}