void mpi_file_read_all_end_f(MPI_Fint *fh, char *buf, MPI_Fint *status, MPI_Fint *ierr) { MPI_Status *c_status; #if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT MPI_Status c_status2; #endif MPI_File c_fh = MPI_File_f2c(*fh); /* See if we got MPI_STATUS_IGNORE */ if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { c_status = MPI_STATUS_IGNORE; } else { /* If sizeof(int) == sizeof(INTEGER), then there's no translation necessary -- let the underlying functions write directly into the Fortran status */ #if OMPI_SIZEOF_FORTRAN_INTEGER == SIZEOF_INT c_status = (MPI_Status *) status; #else c_status = &c_status2; #endif } *ierr = OMPI_INT_2_FINT(MPI_File_read_all_end(c_fh, buf, c_status)); #if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr) && MPI_STATUS_IGNORE != c_status) { MPI_Status_c2f(c_status, status); } #endif }
void mpi_sendrecv_f(char *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, MPI_Fint *dest, MPI_Fint *sendtag, char *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *source, MPI_Fint *recvtag, MPI_Fint *comm, MPI_Fint *status, MPI_Fint *ierr) { MPI_Comm c_comm; MPI_Datatype c_sendtype = MPI_Type_f2c(*sendtype); MPI_Datatype c_recvtype = MPI_Type_f2c(*recvtype); MPI_Status c_status; c_comm = MPI_Comm_f2c (*comm); *ierr = OMPI_INT_2_FINT(MPI_Sendrecv(OMPI_F2C_BOTTOM(sendbuf), OMPI_FINT_2_INT(*sendcount), c_sendtype, OMPI_FINT_2_INT(*dest), OMPI_FINT_2_INT(*sendtag), OMPI_F2C_BOTTOM(recvbuf), *recvcount, c_recvtype, OMPI_FINT_2_INT(*source), OMPI_FINT_2_INT(*recvtag), c_comm, &c_status)); if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr) && !OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { MPI_Status_c2f(&c_status, status); } }
__FRET__ __FFNAME__(__FPARAMS__) { double tstart, tstop; #if HAVE_CREQ /* HAVE _CREQ */ MPI_Request creq; #endif #if HAVE_CSTAT /* HAVE _CSTAT */ MPI_Status cstat; #endif #if HAVE_CCOMM_OUT MPI_Comm ccomm_out; #endif /* HAVE _CCOMM_OUT */ #if HAVE_CCOMM_INOUT MPI_Comm ccomm_inout; #endif /* HAVE _CCOMM_INOUT */ #if HAVE_CCOMM_INOUT ccomm_inout = MPI_Comm_f2c(*comm_inout); #endif #if HAVE_CGROUP_OUT /* HAVE _CGROUP_OUT */ MPI_Group cgroup_out; #endif IPM_TIMESTAMP(tstart); p__FFNAME__(__FARGS__); IPM_TIMESTAMP(tstop); if( ipm_state!=STATE_ACTIVE ) { return; } #if HAVE_CSTAT /* HAVE_CSTAT */ if (*info==MPI_SUCCESS) MPI_Status_c2f(&cstat, status); #endif #if HAVE_CREQ /* HAVE_CREQ */ if( *info==MPI_SUCCESS ) *req=MPI_Request_c2f(creq); #endif #if HAVE_CCOMM_OUT /* HAVE _CCOMM_OUT */ if( *info==MPI_SUCCESS ) *comm_out=MPI_Comm_c2f(ccomm_out); #endif #if HAVE_CCOMM_INOUT /* HAVE _CCOMM_INOUT */ if( *info==MPI_SUCCESS ) *comm_inout=MPI_Comm_c2f(ccomm_inout); #endif #if HAVE_CGROUP_OUT /* HAVE _CGROUP_OUT */ if( *info==MPI_SUCCESS ) *group_out=MPI_Group_c2f(cgroup_out); #endif IPM___CFNAME__(__F2CARGS__, tstart, tstop); }
void mpi_waitall_f(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *array_of_statuses, MPI_Fint *ierr) { MPI_Request *c_req; MPI_Status *c_status; int i; c_req = (MPI_Request *) malloc(OMPI_FINT_2_INT(*count) * (sizeof(MPI_Request) + sizeof(MPI_Status))); if (NULL == c_req) { *ierr = OMPI_INT_2_FINT(OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NO_MEM, FUNC_NAME)); return; } c_status = (MPI_Status*) (c_req + OMPI_FINT_2_INT(*count)); for (i = 0; i < OMPI_FINT_2_INT(*count); ++i) { c_req[i] = MPI_Request_f2c(array_of_requests[i]); } *ierr = OMPI_INT_2_FINT(MPI_Waitall(OMPI_FINT_2_INT(*count), c_req, c_status)); if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) { for (i = 0; i < OMPI_FINT_2_INT(*count); ++i) { array_of_requests[i] = c_req[i]->req_f_to_c_index; if (!OMPI_IS_FORTRAN_STATUSES_IGNORE(array_of_statuses) && !OMPI_IS_FORTRAN_STATUS_IGNORE(&array_of_statuses[i])) { MPI_Status_c2f( &c_status[i], &array_of_statuses[i * (sizeof(MPI_Status) / sizeof(int))]); } } } free(c_req); }
void mpi_probe (MPI_Fint *source, MPI_Fint *tag, MPI_Fint *comm, MPI_Fint *status, MPI_Fint *__ierr) { MPI_Status c_status; *__ierr = MPI_Probe ((int) *source, (int) *tag, MPI_Comm_f2c (*comm), &c_status ); MPI_Status_c2f (&c_status, status); }
void mpi_recv (void *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *source, MPI_Fint *tag, MPI_Fint *comm, MPI_Fint *status, MPI_Fint *__ierr) { MPI_Status s; /* A local status should be used if MPI_Fint and int are different sizes */ *__ierr = MPI_Recv (buf, *count, MPI_Type_f2c (*datatype), *source, *tag, MPI_Comm_f2c (*comm), &s); MPI_Status_c2f (&s, status); }
void mpi_wait (MPI_Fint *request, MPI_Fint *status, MPI_Fint *__ierr) { MPI_Request lrequest; MPI_Status c_status; lrequest = MPI_Request_f2c (*request); *__ierr = MPI_Wait (&lrequest, &c_status); *request = MPI_Request_c2f (lrequest); MPI_Status_c2f (&c_status, status); }
FORTRAN_API void FORT_CALL mpi_wait_ ( MPI_Fint *request, MPI_Fint *status, MPI_Fint *__ierr ) { MPI_Request lrequest; MPI_Status c_status; lrequest = MPI_Request_f2c(*request); *__ierr = MPI_Wait(&lrequest, &c_status); *request = MPI_Request_c2f(lrequest); if (*__ierr == MPI_SUCCESS) MPI_Status_c2f(&c_status, status); }
void mpi_sendrecv (void *sendbuf, MPI_Fint *sendcount, MPI_Fint *sendtype, MPI_Fint *dest, MPI_Fint *sendtag, void *recvbuf, MPI_Fint *recvcount, MPI_Fint *recvtype, MPI_Fint *source, MPI_Fint *recvtag, MPI_Fint *comm, MPI_Fint *status, MPI_Fint *__ierr) { MPI_Status s; *__ierr = MPI_Sendrecv (sendbuf, *sendcount, MPI_Type_f2c (*sendtype), *dest, *sendtag, recvbuf, *recvcount, MPI_Type_f2c (*recvtype), *source, *recvtag, MPI_Comm_f2c (*comm), &s); MPI_Status_c2f (&s, status); }
void ompi_wait_f(MPI_Fint *request, MPI_Fint *status, MPI_Fint *ierr) { int c_ierr; MPI_Request c_req = MPI_Request_f2c(*request); MPI_Status c_status; c_ierr = MPI_Wait(&c_req, &c_status); if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr); if (MPI_SUCCESS == c_ierr) { *request = OMPI_INT_2_FINT(c_req->req_f_to_c_index); if (!OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { MPI_Status_c2f(&c_status, status); } } }
void mpi_iprobe (MPI_Fint *source, MPI_Fint *tag, MPI_Fint *comm, MPI_Fint *flag, MPI_Fint *status, MPI_Fint *__ierr) { int l_flag; MPI_Status c_status; *__ierr = MPI_Iprobe ((int) *source, (int) *tag, MPI_Comm_f2c (*comm), &l_flag, &c_status ); /* ** The following setting ASSUMES that the C value for l_flag (0=false, non-zero=true) ** maps properly to a Fortran logical. Have tested gfortran, Cray, Intel, PGI, ** Pathscale and found this to be valid in all cases. */ *flag = (MPI_Fint) l_flag; if (l_flag) { MPI_Status_c2f (&c_status, status); } }
/* ** mpi_waitall was simplified from the FPMPI version. ** This one has a hard limit of LOCAL_ARRAY_SIZE requests. ** If this limit is exceeded, MPI_Abort is called. There is probably ** a better solution. */ void mpi_waitall (MPI_Fint *count, MPI_Fint array_of_requests[], MPI_Fint array_of_statuses[][MPI_STATUS_SIZE], MPI_Fint *__ierr) { const int LOCAL_ARRAY_SIZE = 128; int i; MPI_Request lrequest[LOCAL_ARRAY_SIZE]; MPI_Status c_status[LOCAL_ARRAY_SIZE]; static const char *thisfunc = "GPTL's mpi_waitall"; if (MPI_STATUS_SIZE != sizeof(MPI_Status)/sizeof(int)) { /* Warning - */ fprintf (stderr, "%s ERROR: mpi_waitall expected sizeof MPI_Status\n" "to be %d integers but it is %d. Rebuild GPTL after ensuring that the\n" "correct value is found and set in macros.make\n", thisfunc, MPI_STATUS_SIZE, (int) (sizeof(MPI_Status)/sizeof(int)) ); fprintf (stderr, "Aborting...\n"); (void) MPI_Abort (MPI_COMM_WORLD, -1); } /* fpmpi does mallocs. Instead used fixed array sizes and Abort if too many */ if ((int) *count > LOCAL_ARRAY_SIZE) { fprintf (stderr, "mpi_waitall: %d is too many requests: recompile f_wrappers_pmpi.c " "with LOCAL_ARRAY_SIZE > %d\n", (int)*count, LOCAL_ARRAY_SIZE); fprintf (stderr, "Aborting...\n"); (void) MPI_Abort (MPI_COMM_WORLD, -1); } if ((int) *count > 0) { for (i = 0; i < (int) *count; i++) { lrequest[i] = MPI_Request_f2c (array_of_requests[i]); } *__ierr = MPI_Waitall ((int)*count, lrequest, c_status); /* By checking for lrequest[i] = 0, we handle persistent requests */ for (i = 0; i < (int)*count; i++) { array_of_requests[i] = MPI_Request_c2f (lrequest[i]); } } else { *__ierr = MPI_Waitall ((int)*count, (MPI_Request *)0, c_status); } for (i = 0; i < (int)*count; i++) MPI_Status_c2f (&(c_status[i]), &(array_of_statuses[i][0])); }
FORTRAN_API void FORT_CALL mpi_waitall_(MPI_Fint *count, MPI_Fint array_of_requests[], MPI_Fint array_of_statuses[][MPI_STATUS_SIZE], MPI_Fint *__ierr ) { int i; MPI_Request *lrequest = 0; MPI_Request local_lrequest[MPIR_USE_LOCAL_ARRAY]; MPI_Status *c_status = 0; MPI_Status local_c_status[MPIR_USE_LOCAL_ARRAY]; if ((int)*count > 0) { if ((int)*count > MPIR_USE_LOCAL_ARRAY) { MPIR_FALLOC(lrequest,(MPI_Request*)MALLOC(sizeof(MPI_Request) * (int)*count), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITALL" ); MPIR_FALLOC(c_status,(MPI_Status*)MALLOC(sizeof(MPI_Status) * (int)*count), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITALL" ); } else { lrequest = local_lrequest; c_status = local_c_status; } for (i=0; i<(int)*count; i++) { lrequest[i] = MPI_Request_f2c( array_of_requests[i] ); } *__ierr = MPI_Waitall((int)*count,lrequest,c_status); /* By checking for lrequest[i] = 0, we handle persistant requests */ for (i=0; i<(int)*count; i++) { array_of_requests[i] = MPI_Request_c2f( lrequest[i] ); } } else *__ierr = MPI_Waitall((int)*count,(MPI_Request *)0, c_status ); if (*__ierr == MPI_SUCCESS) for (i=0; i<(int)*count; i++) MPI_Status_c2f(&(c_status[i]), &(array_of_statuses[i][0]) ); if ((int)*count > MPIR_USE_LOCAL_ARRAY) { FREE( lrequest ); FREE( c_status ); } }
void ompi_iprobe_f(MPI_Fint *source, MPI_Fint *tag, MPI_Fint *comm, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr) { int c_ierr; MPI_Status *c_status; MPI_Comm c_comm; #if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT MPI_Status c_status2; #endif OMPI_LOGICAL_NAME_DECL(flag); c_comm = MPI_Comm_f2c (*comm); /* See if we got MPI_STATUS_IGNORE */ if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { c_status = MPI_STATUS_IGNORE; } else { /* If sizeof(int) == sizeof(INTEGER), then there's no translation necessary -- let the underlying functions write directly into the Fortran status */ #if OMPI_SIZEOF_FORTRAN_INTEGER == SIZEOF_INT c_status = (MPI_Status *) status; #else c_status = &c_status2; #endif } c_ierr = MPI_Iprobe(OMPI_FINT_2_INT(*source), OMPI_FINT_2_INT(*tag), c_comm, OMPI_LOGICAL_SINGLE_NAME_CONVERT(flag), c_status); if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr); if (MPI_SUCCESS == c_ierr) { OMPI_SINGLE_INT_2_LOGICAL(flag); #if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT if (MPI_STATUS_IGNORE != c_status) { MPI_Status_c2f(c_status, status); } #endif } }
void mpi_testany_f(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *index, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr) { MPI_Request *c_req; MPI_Status c_status; int i; OMPI_LOGICAL_NAME_DECL(flag); OMPI_SINGLE_NAME_DECL(index); c_req = (MPI_Request *) malloc(OMPI_FINT_2_INT(*count) * sizeof(MPI_Request)); if (c_req == NULL) { *ierr = OMPI_INT_2_FINT(OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NO_MEM, FUNC_NAME)); return; } for (i = 0; i < OMPI_FINT_2_INT(*count); ++i) { c_req[i] = MPI_Request_f2c(array_of_requests[i]); } *ierr = OMPI_INT_2_FINT(MPI_Testany(OMPI_FINT_2_INT(*count), c_req, OMPI_SINGLE_NAME_CONVERT(index), OMPI_LOGICAL_SINGLE_NAME_CONVERT(flag), &c_status)); OMPI_SINGLE_INT_2_LOGICAL(flag); if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) { /* Increment index by one for fortran conventions */ OMPI_SINGLE_INT_2_FINT(index); if (*flag && MPI_UNDEFINED != *(OMPI_SINGLE_NAME_CONVERT(index))) { array_of_requests[OMPI_INT_2_FINT(*index)] = c_req[OMPI_INT_2_FINT(*index)]->req_f_to_c_index; ++(*index); } if (!OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { MPI_Status_c2f(&c_status, status); } } free(c_req); }
void mpi_status_set_cancelled_f(MPI_Fint *status, ompi_fortran_logical_t *flag, MPI_Fint *ierr) { MPI_Status c_status; /* This seems silly, but someone will do it */ if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { *ierr = OMPI_INT_2_FINT(MPI_SUCCESS); } else { MPI_Status_f2c( status, &c_status ); *ierr = OMPI_INT_2_FINT(MPI_Status_set_cancelled(&c_status, OMPI_LOGICAL_2_INT(*flag))); if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) { MPI_Status_c2f(&c_status, status); } } }
void mpi_test_f(MPI_Fint *request, MPI_Flogical *flag, MPI_Fint *status, MPI_Fint *ierr) { MPI_Request c_req = MPI_Request_f2c(*request); MPI_Status c_status; OMPI_LOGICAL_NAME_DECL(flag); *ierr = OMPI_INT_2_FINT(MPI_Test(&c_req, OMPI_LOGICAL_SINGLE_NAME_CONVERT(flag), &c_status)); OMPI_SINGLE_INT_2_LOGICAL(flag); if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) { *request = OMPI_INT_2_FINT(c_req->req_f_to_c_index); if (!OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { MPI_Status_c2f(&c_status, status); } } }
void mpi_request_get_status_f(MPI_Fint *request, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr) { MPI_Status c_status; MPI_Request c_req = MPI_Request_f2c( *request ); OMPI_LOGICAL_NAME_DECL(flag); /* This seems silly, but someone will do it */ if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { *flag = OMPI_INT_2_LOGICAL(0); *ierr = OMPI_INT_2_FINT(MPI_SUCCESS); } else { *ierr = OMPI_INT_2_FINT(MPI_Request_get_status(c_req, OMPI_LOGICAL_SINGLE_NAME_CONVERT(flag), &c_status)); OMPI_SINGLE_INT_2_LOGICAL(flag); MPI_Status_c2f( &c_status, status ); } }
void mpi_test (MPI_Fint *request, MPI_Fint *flag, MPI_Fint *status, MPI_Fint *__ierr ) { int l_flag; MPI_Status c_status; MPI_Request lrequest = MPI_Request_f2c (*request); *__ierr = MPI_Test (&lrequest, &l_flag, &c_status); *request = MPI_Request_c2f (lrequest); /* In case request is changed */ /* ** The following setting ASSUMES that the C value for l_flag (0=false, non-zero=true) ** maps properly to a Fortran logical. Have tested gfortran, Cray, Intel, PGI, ** Pathscale and found this to be valid in all cases. */ *flag = (MPI_Fint) l_flag; if (l_flag) { MPI_Status_c2f (&c_status, status); } }
void ompi_file_read_ordered_f(MPI_Fint *fh, char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *status, MPI_Fint *ierr) { int c_ierr; MPI_File c_fh = MPI_File_f2c(*fh); MPI_Datatype c_type = MPI_Type_f2c(*datatype); MPI_Status *c_status; #if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT MPI_Status c_status2; #endif /* See if we got MPI_STATUS_IGNORE */ if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { c_status = MPI_STATUS_IGNORE; } else { /* If sizeof(int) == sizeof(INTEGER), then there's no translation necessary -- let the underlying functions write directly into the Fortran status */ #if OMPI_SIZEOF_FORTRAN_INTEGER == SIZEOF_INT c_status = (MPI_Status *) status; #else c_status = &c_status2; #endif } c_ierr = MPI_File_read_ordered(c_fh, OMPI_F2C_BOTTOM(buf), OMPI_FINT_2_INT(*count), c_type, c_status); if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr); #if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT if (MPI_SUCCESS == c_ierr && MPI_STATUS_IGNORE != c_status) { MPI_Status_c2f(c_status, status); } #endif }
FRET FFNAME(FPARAMS) { #if HAVE_CREQ /* HAVE _CREQ */ MPI_Request creq; #endif #if HAVE_CSTAT /* HAVE _CSTAT */ MPI_Status cstat; #endif *info=CFNAME(F2CARGS); #if HAVE_CSTAT /* HAVE _CSTAT */ if (*info==MPI_SUCCESS) MPI_Status_c2f(&cstat, status); #endif #if HAVE_CREQ /* HAVE _CREQ */ if( *info==MPI_SUCCESS ) *req=MPI_Request_c2f(creq); #endif }
void mpi_recv_f(char *buf, MPI_Fint *count, MPI_Fint *datatype, MPI_Fint *source, MPI_Fint *tag, MPI_Fint *comm, MPI_Fint *status, MPI_Fint *ierr) { MPI_Status *c_status; #if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT MPI_Status c_status2; #endif MPI_Comm c_comm = MPI_Comm_f2c(*comm); MPI_Datatype c_type = MPI_Type_f2c(*datatype); /* See if we got MPI_STATUS_IGNORE */ if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { c_status = MPI_STATUS_IGNORE; } else { /* If sizeof(int) == sizeof(INTEGER), then there's no translation necessary -- let the underlying functions write directly into the Fortran status */ #if OMPI_SIZEOF_FORTRAN_INTEGER == SIZEOF_INT c_status = (MPI_Status *) status; #else c_status = &c_status2; #endif } /* Call the C function */ *ierr = OMPI_INT_2_FINT(MPI_Recv(OMPI_F2C_BOTTOM(buf), OMPI_FINT_2_INT(*count), c_type, OMPI_FINT_2_INT(*source), OMPI_FINT_2_INT(*tag), c_comm, c_status)); #if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr) && MPI_STATUS_IGNORE != c_status) { MPI_Status_c2f(c_status, status); } #endif }
void mpi_status_set_elements_f(MPI_Fint *status, MPI_Fint *datatype, MPI_Fint *count, MPI_Fint *ierr) { MPI_Datatype c_type = MPI_Type_f2c(*datatype); MPI_Status c_status; /* This seems silly, but someone will do it */ if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { *ierr = OMPI_INT_2_FINT(MPI_SUCCESS); } else { MPI_Status_f2c( status, &c_status ); *ierr = OMPI_INT_2_FINT(MPI_Status_set_elements(&c_status, c_type, OMPI_FINT_2_INT(*count))); /* If datatype is really being set, then that needs to be converted.... */ if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) { MPI_Status_c2f(&c_status, status); } } }
void ompi_status_set_elements_x_f(MPI_Fint *status, MPI_Fint *datatype, MPI_Count *count, MPI_Fint *ierr) { int c_ierr; MPI_Datatype c_type = MPI_Type_f2c(*datatype); MPI_Status c_status; /* This seems silly, but someone will do it */ if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { c_ierr = MPI_SUCCESS; } else { MPI_Status_f2c( status, &c_status ); c_ierr = MPI_Status_set_elements_x(&c_status, c_type, *count); /* If datatype is really being set, then that needs to be converted.... */ if (MPI_SUCCESS == c_ierr) { MPI_Status_c2f(&c_status, status); } } if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr); }
FORTRAN_API void FORT_CALL mpi_waitsome_( MPI_Fint *incount, MPI_Fint array_of_requests[], MPI_Fint *outcount, MPI_Fint array_of_indices[], MPI_Fint array_of_statuses[][MPI_STATUS_SIZE], MPI_Fint *__ierr ) { int i,j,found; int loutcount; int *l_indices = 0; int local_l_indices[MPIR_USE_LOCAL_ARRAY]; MPI_Request *lrequest = 0; MPI_Request local_lrequest[MPIR_USE_LOCAL_ARRAY]; MPI_Status * c_status = 0; MPI_Status local_c_status[MPIR_USE_LOCAL_ARRAY]; if ((int)*incount > 0) { if ((int)*incount > MPIR_USE_LOCAL_ARRAY) { MPIR_FALLOC(lrequest,(MPI_Request*)MALLOC(sizeof(MPI_Request)* (int)*incount), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITSOME" ); MPIR_FALLOC(l_indices,(int*)MALLOC(sizeof(int)* (int)*incount), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITSOME" ); MPIR_FALLOC(c_status,(MPI_Status*)MALLOC(sizeof(MPI_Status)* (int)*incount), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITSOME" ); } else { lrequest = local_lrequest; l_indices = local_l_indices; c_status = local_c_status; } for (i=0; i<(int)*incount; i++) lrequest[i] = MPI_Request_f2c( array_of_requests[i] ); *__ierr = MPI_Waitsome((int)*incount,lrequest,&loutcount,l_indices, c_status); /* By checking for lrequest[l_indices[i]] = 0, we handle persistant requests */ for (i=0; i<(int)*incount; i++) { if ( i < loutcount) { if (l_indices[i] >= 0) { array_of_requests[l_indices[i]] = MPI_Request_c2f( lrequest[l_indices[i]] ); } } else { found = 0; j = 0; while ( (!found) && (j<loutcount) ) { if (l_indices[j++] == i) found = 1; } if (!found) array_of_requests[i] = MPI_Request_c2f( lrequest[i] ); } } } else *__ierr = MPI_Waitsome( (int)*incount, (MPI_Request *)0, &loutcount, l_indices, c_status ); if (*__ierr != MPI_SUCCESS) return; for (i=0; i<loutcount; i++) { MPI_Status_c2f( &c_status[i], &(array_of_statuses[i][0]) ); if (l_indices[i] >= 0) array_of_indices[i] = l_indices[i] + 1; } *outcount = (MPI_Fint)loutcount; if ((int)*incount > MPIR_USE_LOCAL_ARRAY) { FREE( l_indices ); FREE( lrequest ); FREE( c_status ); } }