void mpi_waitall_f(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *array_of_statuses, MPI_Fint *ierr) { MPI_Request *c_req; MPI_Status *c_status; int i; c_req = (MPI_Request *) malloc(OMPI_FINT_2_INT(*count) * (sizeof(MPI_Request) + sizeof(MPI_Status))); if (NULL == c_req) { *ierr = OMPI_INT_2_FINT(OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NO_MEM, FUNC_NAME)); return; } c_status = (MPI_Status*) (c_req + OMPI_FINT_2_INT(*count)); for (i = 0; i < OMPI_FINT_2_INT(*count); ++i) { c_req[i] = MPI_Request_f2c(array_of_requests[i]); } *ierr = OMPI_INT_2_FINT(MPI_Waitall(OMPI_FINT_2_INT(*count), c_req, c_status)); if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) { for (i = 0; i < OMPI_FINT_2_INT(*count); ++i) { array_of_requests[i] = c_req[i]->req_f_to_c_index; if (!OMPI_IS_FORTRAN_STATUSES_IGNORE(array_of_statuses) && !OMPI_IS_FORTRAN_STATUS_IGNORE(&array_of_statuses[i])) { MPI_Status_c2f( &c_status[i], &array_of_statuses[i * (sizeof(MPI_Status) / sizeof(int))]); } } } free(c_req); }
EXPORT_MPI_API void FORTRAN_API mpi_cancel_( MPI_Fint *request, MPI_Fint *__ierr ) { MPI_Request lrequest; lrequest = MPI_Request_f2c(*request); *__ierr = MPI_Cancel(&lrequest); }
void mpi_wait (MPI_Fint *request, MPI_Fint *status, MPI_Fint *__ierr) { MPI_Request lrequest; MPI_Status c_status; lrequest = MPI_Request_f2c (*request); *__ierr = MPI_Wait (&lrequest, &c_status); *request = MPI_Request_c2f (lrequest); MPI_Status_c2f (&c_status, status); }
void mpif_recv_init_ (void*buffer, int*count, MPI_Fint*type, int*src, int*tag, MPI_Fint*comm, MPI_Fint *req, int*error) { MPI_Datatype c_type = MPI_Type_f2c(*type); MPI_Comm c_comm = MPI_Comm_f2c(*comm); MPI_Request c_req = MPI_Request_f2c(*req); *error = MPI_Recv_init_core(buffer, *count, c_type, *src, *tag, c_comm, &c_req); *req = MPI_Request_c2f(c_req); MPI_Recv_init_epilog(buffer, *count, c_type, *src, *tag, c_comm, req); }
FORTRAN_API void FORT_CALL mpi_wait_ ( MPI_Fint *request, MPI_Fint *status, MPI_Fint *__ierr ) { MPI_Request lrequest; MPI_Status c_status; lrequest = MPI_Request_f2c(*request); *__ierr = MPI_Wait(&lrequest, &c_status); *request = MPI_Request_c2f(lrequest); if (*__ierr == MPI_SUCCESS) MPI_Status_c2f(&c_status, status); }
void mpi_request_free_f(MPI_Fint *request, MPI_Fint *ierr) { int err; MPI_Request c_req = MPI_Request_f2c( *request ); err = MPI_Request_free(&c_req); *ierr = OMPI_INT_2_FINT(err); if (MPI_SUCCESS == err) { *request = OMPI_INT_2_FINT(MPI_REQUEST_NULL->req_f_to_c_index); } }
// MPI_WAIT(REQUEST, STATUS, IERROR) // INTEGER REQUEST, STATUS(MPI_STATUS_SIZE), IERROR // MPI_Request MPI_Request_f2c(MPI_Fint request) // MPI_Fint MPI_Request_c2f(MPI_Request request) void mpi_wait(MPI_Fint *request, MPI_Fint *status, int *ierr) { __SHIM__REGISTER_F(wait); caller = __SHIM__get_caller(); MPI_Request cb_request; cb_request = MPI_Request_f2c(*request); MPI_Status cb_status; MPI_Status_f2c(status, &cb_status); tor_MPI_Wait_pre(&cb_request,&cb_status,caller); __SHIM__FUNC_F(request,status,ierr); tor_MPI_Wait_pos(&cb_request,&cb_status,caller,*ierr); return; }
MPI_Fint c2frequest_ ( MPI_Fint *request ) { MPI_Request req = MPI_Request_f2c( *request ); MPI_Status status; int flag; MPI_Test( &req, &flag, &status ); MPI_Test_cancelled( &status, &flag ); if (!flag) { fprintf( stderr, "Request: Wrong value for flag\n" ); return 1; } else { *request = MPI_Request_c2f( req ); } return 0; }
void ompi_wait_f(MPI_Fint *request, MPI_Fint *status, MPI_Fint *ierr) { int c_ierr; MPI_Request c_req = MPI_Request_f2c(*request); MPI_Status c_status; c_ierr = MPI_Wait(&c_req, &c_status); if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr); if (MPI_SUCCESS == c_ierr) { *request = OMPI_INT_2_FINT(c_req->req_f_to_c_index); if (!OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { MPI_Status_c2f(&c_status, status); } } }
/* ** mpi_waitall was simplified from the FPMPI version. ** This one has a hard limit of LOCAL_ARRAY_SIZE requests. ** If this limit is exceeded, MPI_Abort is called. There is probably ** a better solution. */ void mpi_waitall (MPI_Fint *count, MPI_Fint array_of_requests[], MPI_Fint array_of_statuses[][MPI_STATUS_SIZE], MPI_Fint *__ierr) { const int LOCAL_ARRAY_SIZE = 128; int i; MPI_Request lrequest[LOCAL_ARRAY_SIZE]; MPI_Status c_status[LOCAL_ARRAY_SIZE]; static const char *thisfunc = "GPTL's mpi_waitall"; if (MPI_STATUS_SIZE != sizeof(MPI_Status)/sizeof(int)) { /* Warning - */ fprintf (stderr, "%s ERROR: mpi_waitall expected sizeof MPI_Status\n" "to be %d integers but it is %d. Rebuild GPTL after ensuring that the\n" "correct value is found and set in macros.make\n", thisfunc, MPI_STATUS_SIZE, (int) (sizeof(MPI_Status)/sizeof(int)) ); fprintf (stderr, "Aborting...\n"); (void) MPI_Abort (MPI_COMM_WORLD, -1); } /* fpmpi does mallocs. Instead used fixed array sizes and Abort if too many */ if ((int) *count > LOCAL_ARRAY_SIZE) { fprintf (stderr, "mpi_waitall: %d is too many requests: recompile f_wrappers_pmpi.c " "with LOCAL_ARRAY_SIZE > %d\n", (int)*count, LOCAL_ARRAY_SIZE); fprintf (stderr, "Aborting...\n"); (void) MPI_Abort (MPI_COMM_WORLD, -1); } if ((int) *count > 0) { for (i = 0; i < (int) *count; i++) { lrequest[i] = MPI_Request_f2c (array_of_requests[i]); } *__ierr = MPI_Waitall ((int)*count, lrequest, c_status); /* By checking for lrequest[i] = 0, we handle persistent requests */ for (i = 0; i < (int)*count; i++) { array_of_requests[i] = MPI_Request_c2f (lrequest[i]); } } else { *__ierr = MPI_Waitall ((int)*count, (MPI_Request *)0, c_status); } for (i = 0; i < (int)*count; i++) MPI_Status_c2f (&(c_status[i]), &(array_of_statuses[i][0])); }
FORTRAN_API void FORT_CALL mpi_waitall_(MPI_Fint *count, MPI_Fint array_of_requests[], MPI_Fint array_of_statuses[][MPI_STATUS_SIZE], MPI_Fint *__ierr ) { int i; MPI_Request *lrequest = 0; MPI_Request local_lrequest[MPIR_USE_LOCAL_ARRAY]; MPI_Status *c_status = 0; MPI_Status local_c_status[MPIR_USE_LOCAL_ARRAY]; if ((int)*count > 0) { if ((int)*count > MPIR_USE_LOCAL_ARRAY) { MPIR_FALLOC(lrequest,(MPI_Request*)MALLOC(sizeof(MPI_Request) * (int)*count), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITALL" ); MPIR_FALLOC(c_status,(MPI_Status*)MALLOC(sizeof(MPI_Status) * (int)*count), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITALL" ); } else { lrequest = local_lrequest; c_status = local_c_status; } for (i=0; i<(int)*count; i++) { lrequest[i] = MPI_Request_f2c( array_of_requests[i] ); } *__ierr = MPI_Waitall((int)*count,lrequest,c_status); /* By checking for lrequest[i] = 0, we handle persistant requests */ for (i=0; i<(int)*count; i++) { array_of_requests[i] = MPI_Request_c2f( lrequest[i] ); } } else *__ierr = MPI_Waitall((int)*count,(MPI_Request *)0, c_status ); if (*__ierr == MPI_SUCCESS) for (i=0; i<(int)*count; i++) MPI_Status_c2f(&(c_status[i]), &(array_of_statuses[i][0]) ); if ((int)*count > MPIR_USE_LOCAL_ARRAY) { FREE( lrequest ); FREE( c_status ); } }
void mpi_isend(void *buf, int *count, MPI_Fint *datatype, int *dest, int *tag, MPI_Fint *comm, MPI_Fint *request,int *ierr) { __SHIM__REGISTER_F(isend); caller = __SHIM__get_caller(); MPI_Datatype cb_datatype; MPI_Comm cb_comm; MPI_Request cb_request; cb_datatype = MPI_Type_f2c(*datatype); cb_comm = MPI_Comm_f2c(*comm); cb_request = MPI_Request_f2c(*request); tor_MPI_Isend_pre(buf, *count, cb_datatype, *dest, *tag, cb_comm, &cb_request,caller); __SHIM__FUNC_F(buf,count,datatype,dest,tag,comm,request,ierr); tor_MPI_Isend_pos(buf, *count, cb_datatype, *dest, *tag, cb_comm, &cb_request,caller,*ierr); return; }
EXPORT_MPI_API void FORTRAN_API mpi_request_free_( MPI_Fint *request, MPI_Fint *__ierr ) { MPI_Request lrequest = MPI_Request_f2c(*request); *__ierr = MPI_Request_free( &lrequest ); #ifdef OLD_POINTER /* We actually need to remove the pointer from the mapping if the ref count is zero. We do that by checking to see if lrequest was set to NULL. */ if (!lrequest) { MPIR_RmPointer( *(int*)request ); *(int*)request = 0; } #endif *request = MPI_Request_c2f(lrequest); }
void mpi_testany_f(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *index, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr) { MPI_Request *c_req; MPI_Status c_status; int i; OMPI_LOGICAL_NAME_DECL(flag); OMPI_SINGLE_NAME_DECL(index); c_req = (MPI_Request *) malloc(OMPI_FINT_2_INT(*count) * sizeof(MPI_Request)); if (c_req == NULL) { *ierr = OMPI_INT_2_FINT(OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NO_MEM, FUNC_NAME)); return; } for (i = 0; i < OMPI_FINT_2_INT(*count); ++i) { c_req[i] = MPI_Request_f2c(array_of_requests[i]); } *ierr = OMPI_INT_2_FINT(MPI_Testany(OMPI_FINT_2_INT(*count), c_req, OMPI_SINGLE_NAME_CONVERT(index), OMPI_LOGICAL_SINGLE_NAME_CONVERT(flag), &c_status)); OMPI_SINGLE_INT_2_LOGICAL(flag); if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) { /* Increment index by one for fortran conventions */ OMPI_SINGLE_INT_2_FINT(index); if (*flag && MPI_UNDEFINED != *(OMPI_SINGLE_NAME_CONVERT(index))) { array_of_requests[OMPI_INT_2_FINT(*index)] = c_req[OMPI_INT_2_FINT(*index)]->req_f_to_c_index; ++(*index); } if (!OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { MPI_Status_c2f(&c_status, status); } } free(c_req); }
void mpi_request_get_status_f(MPI_Fint *request, ompi_fortran_logical_t *flag, MPI_Fint *status, MPI_Fint *ierr) { MPI_Status c_status; MPI_Request c_req = MPI_Request_f2c( *request ); OMPI_LOGICAL_NAME_DECL(flag); /* This seems silly, but someone will do it */ if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { *flag = OMPI_INT_2_LOGICAL(0); *ierr = OMPI_INT_2_FINT(MPI_SUCCESS); } else { *ierr = OMPI_INT_2_FINT(MPI_Request_get_status(c_req, OMPI_LOGICAL_SINGLE_NAME_CONVERT(flag), &c_status)); OMPI_SINGLE_INT_2_LOGICAL(flag); MPI_Status_c2f( &c_status, status ); } }
void mpi_test_f(MPI_Fint *request, MPI_Flogical *flag, MPI_Fint *status, MPI_Fint *ierr) { MPI_Request c_req = MPI_Request_f2c(*request); MPI_Status c_status; OMPI_LOGICAL_NAME_DECL(flag); *ierr = OMPI_INT_2_FINT(MPI_Test(&c_req, OMPI_LOGICAL_SINGLE_NAME_CONVERT(flag), &c_status)); OMPI_SINGLE_INT_2_LOGICAL(flag); if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr)) { *request = OMPI_INT_2_FINT(c_req->req_f_to_c_index); if (!OMPI_IS_FORTRAN_STATUS_IGNORE(status)) { MPI_Status_c2f(&c_status, status); } } }
void mpi_test (MPI_Fint *request, MPI_Fint *flag, MPI_Fint *status, MPI_Fint *__ierr ) { int l_flag; MPI_Status c_status; MPI_Request lrequest = MPI_Request_f2c (*request); *__ierr = MPI_Test (&lrequest, &l_flag, &c_status); *request = MPI_Request_c2f (lrequest); /* In case request is changed */ /* ** The following setting ASSUMES that the C value for l_flag (0=false, non-zero=true) ** maps properly to a Fortran logical. Have tested gfortran, Cray, Intel, PGI, ** Pathscale and found this to be valid in all cases. */ *flag = (MPI_Fint) l_flag; if (l_flag) { MPI_Status_c2f (&c_status, status); } }
void mpi_startall_f(MPI_Fint *count, MPI_Fint *array_of_requests, MPI_Fint *ierr) { MPI_Request *c_req; int i; c_req = malloc(*count * sizeof(MPI_Request)); if (NULL == c_req) { *ierr = OMPI_ERRHANDLER_INVOKE(MPI_COMM_WORLD, MPI_ERR_NO_MEM, FUNC_NAME); return; } for(i = 0; i < *count; i++ ) { c_req[i] = MPI_Request_f2c(array_of_requests[i]); } *ierr = OMPI_INT_2_FINT(MPI_Startall(OMPI_FINT_2_INT(*count), c_req)); for( i = 0; i < *count; i++ ) { array_of_requests[i] = MPI_Request_c2f(c_req[i]); } free(c_req); }
FORTRAN_API void FORT_CALL mpi_waitsome_( MPI_Fint *incount, MPI_Fint array_of_requests[], MPI_Fint *outcount, MPI_Fint array_of_indices[], MPI_Fint array_of_statuses[][MPI_STATUS_SIZE], MPI_Fint *__ierr ) { int i,j,found; int loutcount; int *l_indices = 0; int local_l_indices[MPIR_USE_LOCAL_ARRAY]; MPI_Request *lrequest = 0; MPI_Request local_lrequest[MPIR_USE_LOCAL_ARRAY]; MPI_Status * c_status = 0; MPI_Status local_c_status[MPIR_USE_LOCAL_ARRAY]; if ((int)*incount > 0) { if ((int)*incount > MPIR_USE_LOCAL_ARRAY) { MPIR_FALLOC(lrequest,(MPI_Request*)MALLOC(sizeof(MPI_Request)* (int)*incount), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITSOME" ); MPIR_FALLOC(l_indices,(int*)MALLOC(sizeof(int)* (int)*incount), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITSOME" ); MPIR_FALLOC(c_status,(MPI_Status*)MALLOC(sizeof(MPI_Status)* (int)*incount), MPIR_COMM_WORLD, MPI_ERR_EXHAUSTED, "MPI_WAITSOME" ); } else { lrequest = local_lrequest; l_indices = local_l_indices; c_status = local_c_status; } for (i=0; i<(int)*incount; i++) lrequest[i] = MPI_Request_f2c( array_of_requests[i] ); *__ierr = MPI_Waitsome((int)*incount,lrequest,&loutcount,l_indices, c_status); /* By checking for lrequest[l_indices[i]] = 0, we handle persistant requests */ for (i=0; i<(int)*incount; i++) { if ( i < loutcount) { if (l_indices[i] >= 0) { array_of_requests[l_indices[i]] = MPI_Request_c2f( lrequest[l_indices[i]] ); } } else { found = 0; j = 0; while ( (!found) && (j<loutcount) ) { if (l_indices[j++] == i) found = 1; } if (!found) array_of_requests[i] = MPI_Request_c2f( lrequest[i] ); } } } else *__ierr = MPI_Waitsome( (int)*incount, (MPI_Request *)0, &loutcount, l_indices, c_status ); if (*__ierr != MPI_SUCCESS) return; for (i=0; i<loutcount; i++) { MPI_Status_c2f( &c_status[i], &(array_of_statuses[i][0]) ); if (l_indices[i] >= 0) array_of_indices[i] = l_indices[i] + 1; } *outcount = (MPI_Fint)loutcount; if ((int)*incount > MPIR_USE_LOCAL_ARRAY) { FREE( l_indices ); FREE( lrequest ); FREE( c_status ); } }
EXPORT_MPI_API void FORTRAN_API mpi_start_( MPI_Fint *request, MPI_Fint *__ierr ) { MPI_Request lrequest = MPI_Request_f2c(*request ); *__ierr = MPI_Start( &lrequest ); *request = MPI_Request_c2f(lrequest); }
void mpi_grequest_complete_f(MPI_Fint *request, MPI_Fint *ierr) { MPI_Request c_req = MPI_Request_f2c(*request); *ierr = OMPI_INT_2_FINT(MPI_Grequest_complete(c_req)); }