/* * Class: mpi_Win * Method: getGroup * Signature: (J)J */ JNIEXPORT jlong JNICALL Java_mpi_Win_getGroup( JNIEnv *env, jobject jthis, jlong win) { MPI_Group group; int rc = MPI_Win_get_group((MPI_Win)win, &group); ompi_java_exceptionCheck(env, rc); return (jlong)group; }
int main(int argc, char ** argv) { MPI_Aint win_size = WIN_SIZE; MPI_Win win; MPI_Group group; char* base; int disp_unit = 1; int rank, size, target_rank, target_disp = 1; int r, flag; /*************************************************************/ /* Init and set values */ /*************************************************************/ MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); target_rank = (rank + 1) % size; MPI_Alloc_mem(WIN_SIZE, MPI_INFO_NULL, &base); if ( NULL == base ) { printf("failed to alloc %d\n", WIN_SIZE); exit(16); } /*************************************************************/ /* Win_create */ /*************************************************************/ /* MPI_Win_create(void *base, MPI_Aint size, int disp_unit, MPI_Info info, MPI_Comm comm, MPI_Win *win); */ r = MPI_Win_create(base, win_size, 1, MPI_INFO_NULL, MPI_COMM_WORLD, &win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_create\n", rank); /*************************************************************/ /* First epoch: Tests Put, Get, Get_group, Post, Start, */ /* Complete, Wait, Lock, Unlock */ /*************************************************************/ r = MPI_Win_get_group(win, &group); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_get_group\n", rank); r = MPI_Win_post(group, 0, win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_post\n", rank); r = MPI_Win_start(group, 0, win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_start\n", rank); r = MPI_Win_lock(MPI_LOCK_SHARED, target_rank, 0, win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_lock\n", rank); /* MPI_Put(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win) */ r = MPI_Put(base, WIN_SIZE, MPI_BYTE, target_rank, target_disp, WIN_SIZE, MPI_BYTE, win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Put\n", rank); r = MPI_Win_unlock(target_rank, win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_unlock\n", rank); /* MPI_Get(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Win win); */ r = MPI_Get(base, WIN_SIZE, MPI_BYTE, target_rank, target_disp, WIN_SIZE, MPI_BYTE, win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Get\n", rank); r = MPI_Win_complete(win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_complete\n", rank); r = MPI_Win_test(win, &flag); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_test\n", rank); r = MPI_Win_wait(win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_wait\n", rank); /*************************************************************************/ /* Second epoch: Tests Accumulate and Fence */ /*************************************************************************/ r = MPI_Win_fence(0, win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_fence\n", rank); if ( rank == 0 ) { /* MPI_Accumulate(void *origin_addr, int origin_count, MPI_Datatype origin_datatype, int target_rank, MPI_Aint target_disp, int target_count, MPI_Datatype target_datatype, MPI_Op op, MPI_Win win) */ r = MPI_Accumulate(base, WIN_SIZE, MPI_BYTE, 0, target_disp, WIN_SIZE, MPI_BYTE, MPI_SUM, win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Accumulate\n", rank); } r = MPI_Win_fence(0, win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_fence\n", rank); /*************************************************************/ /* Win_free and Finalize */ /*************************************************************/ r = MPI_Win_free(&win); if ( MPI_SUCCESS TEST_OP r ) printf("Rank %d failed MPI_Win_free\n", rank); free(base); MPI_Finalize(); }
int main( int argc, char *argv[] ) { int errs = 0, err; int rank, size, source, dest; int minsize = 2, count; MPI_Comm comm; MPI_Win win; MPI_Aint extent; MPI_Group wingroup, neighbors; MTestDatatype sendtype, recvtype; MTest_Init( &argc, &argv ); /* The following illustrates the use of the routines to run through a selection of communicators and datatypes. Use subsets of these for tests that do not involve combinations of communicators, datatypes, and counts of datatypes */ while (MTestGetIntracommGeneral( &comm, minsize, 1 )) { if (comm == MPI_COMM_NULL) continue; /* Determine the sender and receiver */ MPI_Comm_rank( comm, &rank ); MPI_Comm_size( comm, &size ); source = 0; dest = size - 1; for (count = 1; count < 65000; count = count * 2) { while (MTestGetDatatypes( &sendtype, &recvtype, count )) { /* Make sure that everyone has a recv buffer */ recvtype.InitBuf( &recvtype ); MPI_Type_extent( recvtype.datatype, &extent ); MPI_Win_create( recvtype.buf, recvtype.count * extent, (int)extent, MPI_INFO_NULL, comm, &win ); MPI_Win_get_group( win, &wingroup ); if (rank == source) { /* To improve reporting of problems about operations, we change the error handler to errors return */ MPI_Win_set_errhandler( win, MPI_ERRORS_RETURN ); sendtype.InitBuf( &sendtype ); /* Neighbor is dest only */ MPI_Group_incl( wingroup, 1, &dest, &neighbors ); err = MPI_Win_start( neighbors, 0, win ); if (err) { errs++; if (errs < 10) { MTestPrintError( err ); } } MPI_Group_free( &neighbors ); err = MPI_Put( sendtype.buf, sendtype.count, sendtype.datatype, dest, 0, recvtype.count, recvtype.datatype, win ); if (err) { errs++; MTestPrintError( err ); } err = MPI_Win_complete( win ); if (err) { errs++; if (errs < 10) { MTestPrintError( err ); } } } else if (rank == dest) { MPI_Group_incl( wingroup, 1, &source, &neighbors ); MPI_Win_post( neighbors, 0, win ); MPI_Group_free( &neighbors ); MPI_Win_wait( win ); /* This should have the same effect, in terms of transfering data, as a send/recv pair */ err = MTestCheckRecv( 0, &recvtype ); if (err) { errs += errs; } } else { /* Nothing; the other processes need not call any MPI routines */ ; } MPI_Win_free( &win ); MTestFreeDatatype( &sendtype ); MTestFreeDatatype( &recvtype ); MPI_Group_free( &wingroup ); } } MTestFreeComm( &comm ); } MTest_Finalize( errs ); MPI_Finalize(); return 0; }
FORT_DLL_SPEC void FORT_CALL mpi_win_get_group_ ( MPI_Fint *v1, MPI_Fint *v2, MPI_Fint *ierr ){ *ierr = MPI_Win_get_group( (MPI_Win)*v1, v2 ); }