void Del_Errhand(struct comm_info* c_info) { #ifdef SET_ERRH MPI_Errhandler_free(&c_info->ERR); #ifdef EXT MPI_Errhandler_free(&c_info->ERRW); #endif #ifdef MPIIO MPI_Errhandler_free(&c_info->ERRF); #endif #endif }
int main( int argc, char *argv[] ) { int buf[2]; MPI_Win win; MPI_Errhandler newerr; int i; MTest_Init( &argc, &argv ); /* Run this test multiple times to expose storage leaks (we found a leak of error handlers with this test) */ for (i=0;i<1000; i++) { calls = 0; MPI_Win_create( buf, 2*sizeof(int), sizeof(int), MPI_INFO_NULL, MPI_COMM_WORLD, &win ); mywin = win; MPI_Win_create_errhandler( eh, &newerr ); MPI_Win_set_errhandler( win, newerr ); MPI_Win_call_errhandler( win, MPI_ERR_OTHER ); MPI_Errhandler_free( &newerr ); if (calls != 1) { errs++; printf( "Error handler not called\n" ); } MPI_Win_free( &win ); } MTest_Finalize( errs ); MPI_Finalize(); return 0; }
int numProcsFails(MPI_Comm mcw){ int rank, ret, numFailures = 0, flag; MPI_Group fGroup; MPI_Errhandler newEh; MPI_Comm dupComm; // Error handler MPI_Comm_create_errhandler(mpiErrorHandler, &newEh); MPI_Comm_rank(mcw, &rank); // Set error handler for communicator MPI_Comm_set_errhandler(mcw, newEh); // Target function if(MPI_SUCCESS != (ret = MPI_Comm_dup(mcw, &dupComm))) { //if(MPI_SUCCESS != (ret = MPI_Barrier(mcw))) { // MPI_Comm_dup or MPI_Barrier OMPI_Comm_failure_ack(mcw); OMPI_Comm_failure_get_acked(mcw, &fGroup); // Get the number of failures MPI_Group_size(fGroup, &numFailures); }// end of "MPI_Comm_dup failure" OMPI_Comm_agree(mcw, &flag); // Memory release if(numFailures > 0) MPI_Group_free(&fGroup); MPI_Errhandler_free(&newEh); return numFailures; }//numProcsFails()
int main( int argc, char *argv[] ) { int rank, errs = 0, rc; MPI_Errhandler ioerr_handler; MPI_Status status; MPI_File fh; char inbuf[80]; MTest_Init( &argc, &argv ); MPI_Comm_rank( MPI_COMM_WORLD, &rank ); /* Create a file to which to attach the handler */ rc = MPI_File_open( MPI_COMM_WORLD, (char*)"test.txt", MPI_MODE_CREATE | MPI_MODE_WRONLY | MPI_MODE_DELETE_ON_CLOSE, MPI_INFO_NULL, &fh ); if (rc) { errs ++; printf( "Unable to open test.txt for writing\n" ); } rc = MPI_File_create_errhandler( user_handler, &ioerr_handler ); if (rc) { errs++; printf("MPI_File_create_Errhandler returned an error code: %d\n", rc); } rc = MPI_File_set_errhandler( fh, ioerr_handler ); if (rc) { errs++; printf("MPI_File_set_errhandler returned an error code: %d\n", rc); } /* avoid leaking the errhandler, safe because they have refcount semantics */ rc = MPI_Errhandler_free(&ioerr_handler); if (rc) { errs++; printf("MPI_Errhandler_free returned an error code: %d\n", rc); } /* This should generate an error because the file mode is WRONLY */ rc = MPI_File_read_at( fh, 0, inbuf, 80, MPI_BYTE, &status ); if (handlerCalled != 1) { errs++; printf( "User-defined error handler was not called\n" ); } rc = MPI_File_close( &fh ); if (rc) { errs++; printf("MPI_File_close returned an error code: %d\n",rc); } MTest_Finalize( errs ); MPI_Finalize( ); return 0; }
int main (int argc, char **argv) { MPI_Errhandler errh; int wrank; MPI_Init (&argc, &argv); MPI_Comm_rank( MPI_COMM_WORLD, &wrank ); MPI_Comm_create_errhandler((MPI_Comm_errhandler_function*)errf, &errh); MPI_Comm_set_errhandler(MPI_COMM_WORLD, errh); MPI_Comm_set_errhandler(MPI_COMM_SELF, errh); MPI_Errhandler_free(&errh); MPI_Finalize(); /* Test harness requirement is that only one process write No Errors */ if (wrank == 0) printf(" No Errors\n"); return 0; }
int main(int argc, char *argv[]) { int rank, size, i; int sum = 0, val = 1; int errs = 0; MPI_Errhandler errhandler; MPI_Init(&argc, &argv); MPI_Comm_rank(MPI_COMM_WORLD, &rank); MPI_Comm_size(MPI_COMM_WORLD, &size); if (size < 4) { fprintf(stderr, "Must run with at least 4 processes.\n"); MPI_Abort(MPI_COMM_WORLD, 1); } MPI_Comm_dup(MPI_COMM_WORLD, &comm_all); MPI_Comm_create_errhandler(&error_handler, &errhandler); MPI_Comm_set_errhandler(comm_all, errhandler); for (i = 0; i < 10; ++i) { MPI_Comm_size(comm_all, &size); sum = 0; if (i == 5 && rank == 1) { exit(1); } else if (i != 5) { MPI_Allreduce(&val, &sum, 1, MPI_INT, MPI_SUM, comm_all); if (sum != size && rank == 0) { errs++; fprintf(stderr, "Incorrect answer: %d != %d\n", sum, size); } } } if (0 == rank && errs) { fprintf(stdout, " Found %d errors\n", errs); } else if (0 == rank) { fprintf(stdout, " No errors\n"); } MPI_Comm_free(&comm_all); MPI_Errhandler_free(&errhandler); MPI_Finalize(); return 0; }
int main(int argc, char *argv[]) { int err; int buf[2]; MPI_Win win; MPI_Comm comm; MPI_Errhandler newerr, olderr; MTEST_VG_MEM_INIT(buf, 2 * sizeof(int)); MTest_Init(&argc, &argv); comm = MPI_COMM_WORLD; MPI_Win_create_errhandler(weh, &newerr); MPI_Win_create(buf, 2 * sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win); mywin = win; MPI_Win_get_errhandler(win, &olderr); if (olderr != MPI_ERRORS_ARE_FATAL) { errs++; printf("Expected errors are fatal\n"); } MPI_Win_set_errhandler(win, newerr); expected_err_class = MPI_ERR_RANK; err = MPI_Put(buf, 1, MPI_INT, -5, 0, 1, MPI_INT, win); if (calls != 1) { errs++; printf("newerr not called\n"); calls = 1; } expected_err_class = MPI_ERR_OTHER; MPI_Win_call_errhandler(win, MPI_ERR_OTHER); if (calls != 2) { errs++; printf("newerr not called (2)\n"); } MPI_Win_free(&win); MPI_Errhandler_free(&newerr); MTest_Finalize(errs); return MTestReturnValue(errs); }
mpi_error_handler::~mpi_error_handler() { MPI_Errhandler_free( &handler_handle_ ); }
int main( int argc, char *argv[] ) { int err; int buf[2]; MPI_Win win; MPI_Comm comm; MPI_Errhandler newerr1, newerr2, olderr; MTest_Init( &argc, &argv ); comm = MPI_COMM_WORLD; MPI_Win_create_errhandler( weh1, &newerr1 ); MPI_Win_create_errhandler( weh2, &newerr2 ); MPI_Win_create( buf, 2*sizeof(int), sizeof(int), MPI_INFO_NULL, comm, &win ); mywin = win; MPI_Win_get_errhandler( win, &olderr ); if (olderr != MPI_ERRORS_ARE_FATAL) { errs++; printf( "Expected errors are fatal\n" ); } MPI_Win_set_errhandler( win, newerr1 ); /* We should be able to free the error handler now since the window is using it */ MPI_Errhandler_free( &newerr1 ); expected_err_class = MPI_ERR_RANK; err = MPI_Put( buf, 1, MPI_INT, -5, 0, 1, MPI_INT, win ); if (w1Called != 1) { errs ++; printf( "newerr1 not called\n" ); w1Called = 1; } expected_err_class = MPI_ERR_OTHER; MPI_Win_call_errhandler( win, MPI_ERR_OTHER ); if (w1Called != 2) { errs ++; printf( "newerr1 not called (2)\n" ); } if (w1Called != 2 || w2Called != 0) { errs++; printf( "Error handler weh1 not called the expected number of times\n" ); } /* Try another error handler. This should allow the MPI implementation to free the first error handler */ MPI_Win_set_errhandler( win, newerr2 ); MPI_Errhandler_free( &newerr2 ); expected_err_class = MPI_ERR_RANK; err = MPI_Put( buf, 1, MPI_INT, -5, 0, 1, MPI_INT, win ); if (w2Called != 1) { errs ++; printf( "newerr2 not called\n" ); calls = 1; } expected_err_class = MPI_ERR_OTHER; MPI_Win_call_errhandler( win, MPI_ERR_OTHER ); if (w2Called != 2) { errs ++; printf( "newerr2 not called (2)\n" ); } if (w1Called != 2 || w2Called != 2) { errs++; printf( "Error handler weh1 not called the expected number of times\n" ); } MPI_Win_free( &win ); MTest_Finalize( errs ); MPI_Finalize(); return 0; }
MPI_Comm communicatorReconstruct(MPI_Comm myCommWorld, int childFlag, int * listFails, int * numFails, int * numNodeFails, int sumPrevNumNodeFails, int argc, char ** argv, int verbosity) { int i, ret, rank, nprocs, oldRank = 0, totFails = 0, * failedList, flag; int iterCounter = 0, failure = 0, recvVal[2], length; MPI_Status mpiStatus; MPI_Comm parent, mcw; MPI_Comm dupComm, tempIntracomm, unorderIntracomm; MPI_Errhandler newEh; double startTime = 0.0, endTime; char hostName[MPI_MAX_PROCESSOR_NAME]; // Error handler MPI_Comm_create_errhandler(mpiErrorHandler, &newEh); MPI_Comm_get_parent(&parent); MPI_Comm_rank(myCommWorld, &rank); if(MPI_COMM_NULL == parent && childFlag == 0 && rank == 0) startTime = MPI_Wtime(); do { failure = 0; ret = MPI_SUCCESS; /* if(childFlag == 0 && MPI_COMM_NULL != parent){ parent = MPI_COMM_NULL; } */ // Parent part if(MPI_COMM_NULL == parent) { if(iterCounter == 0) mcw = myCommWorld; // Set error handler for communicator MPI_Comm_set_errhandler(mcw, newEh); // World information MPI_Comm_rank(mcw, &rank); MPI_Comm_size(mcw, &nprocs); // Synchronize. Sometimes hangs on without this #ifdef HANG_ON_REMOVE //MPI_Barrier(mcw); OMPI_Comm_agree(mcw, &flag); // since some of the times MPI_Barrier hangs on #endif // Target function //if(MPI_SUCCESS != (ret = MPI_Barrier(mcw))){ if(MPI_SUCCESS != (ret = MPI_Comm_dup(mcw, &dupComm))) { if(verbosity > 0 && rank == 0) printf("[????? Process %d (nprocs %d)] MPI_Comm_dup (parent): " "Unsuccessful (due to process failure) OK\n", rank, nprocs); // Revoke the communicator if(MPI_SUCCESS != (OMPI_Comm_revoke(mcw))) { if(rank == 0) printf("[Process %d (nprocs %d)] Iteration %d: OMPI_Comm_revoke " "(parent): Error!\n", rank, nprocs, iterCounter); } else { if(verbosity > 1 && rank == 0) printf("[Process %d (nprocs %d)] Iteration %d: OMPI_Comm_revoke " "(parent): SUCCESS\n", rank, nprocs, iterCounter); } // Call repair with splitted world totFails = numProcsFails(mcw); failedList = (int *) malloc(totFails*sizeof(int)); repairComm(&mcw, &tempIntracomm, iterCounter, failedList, numFails, numNodeFails, sumPrevNumNodeFails, argc, argv, verbosity); // Assign list of failed processes #pragma omp parallel for default(shared) for(i = 0; i < *numFails; i++) listFails[i] = failedList[i]; // Free memory free(failedList); // Operation failed: retry failure = 1; } //end of "if MPI_Barrier/MPI_Comm_dup fails" else { if(verbosity > 0 && rank == 0) printf("[..... Process %d (nprocs %d)] Iteration %d: MPI_Comm_dup " "(parent): SUCCESS\n", rank, nprocs, iterCounter); // Operation success: breaking iteration failure = 0; } } // end of "parent" // Child part else { MPI_Comm_set_errhandler(parent, newEh); // Synchronize. Sometimes hangs on without this // Position of code and intercommunicator, parent, (not intra) is important #ifdef HANG_ON_REMOVE //MPI_Barrier(parent); OMPI_Comm_agree(parent, &flag);// since some of the times MPI_Barrier hangs on #endif MPI_Comm_rank(parent, &rank); MPI_Comm_size(parent, &nprocs); if(verbosity > 0 && rank == 0) { MPI_Get_processor_name(hostName, &length); printf("[Process %d, nprocs = %d] created on host %s (child)\n", rank, nprocs, hostName); } if(MPI_SUCCESS != (MPI_Intercomm_merge(parent, true, &unorderIntracomm))) { if(rank == 0) printf("[Process %d] Iteration %d: MPI_Intercomm_merge (child): Error!\n", rank, iterCounter); } else { if(verbosity > 1 && rank == 0) printf("[Process %d] Iteration %d: MPI_Intercomm_merge (child): SUCCESS\n", rank, iterCounter); } // Receive failed ranks and number of fails from process 0 of parent if(MPI_SUCCESS != (MPI_Recv(&recvVal, 2, MPI_INT, 0, MERGE_TAG, unorderIntracomm, &mpiStatus))) { if(rank == 0) printf("[Process %d] Iteration %d: MPI_Recv1 (child): Error!\n", rank, iterCounter); } else { if(verbosity > 1 && rank == 0) printf("[Process %d] Iteration %d: MPI_Recv1 (child): SUCCESS\n", rank, iterCounter); oldRank = recvVal[0]; *numFails = recvVal[1]; } // Split the communicator to order the ranks. // No order is maintaining here. Actual ordering is done on parent side // This is a support only to parent side if(MPI_SUCCESS != (MPI_Comm_split(unorderIntracomm, 0, oldRank, &tempIntracomm))) { if(rank == 0) printf("[Process %d] Iteration %d: MPI_Comm_split (child): Error!\n", rank, iterCounter); } else { if(verbosity > 1 && rank == 0) printf("[Process %d] Iteration %d: MPI_Comm_split (child): SUCCESS\n", rank, iterCounter); } // Operation on parent failed: retry ret = (!MPI_SUCCESS); failure = 1; // Free memory MPI_Comm_free(&unorderIntracomm); MPI_Comm_free(&parent); }// end of "child" // Reset comm world if(ret != MPI_SUCCESS) mcw = tempIntracomm; // Reset parent value for parent if(parent == MPI_COMM_NULL && ret != MPI_SUCCESS) parent = mcw; // Reset parent value of child and make the operation collective if(MPI_SUCCESS != ret && MPI_COMM_NULL != parent) parent = MPI_COMM_NULL; iterCounter++; } while(failure > 1);// replace 'failure > 1' with 'failure' if want fault tolerant recovery if(MPI_COMM_NULL == parent && childFlag == 0 && rank == 0) { endTime = MPI_Wtime(); printf("[%d]----- Reconstructing failed communicator (including failed list creation) " "takes %0.6f Sec (MPI_Wtime) -----\n", rank, endTime - startTime); } // Memory release MPI_Errhandler_free(&newEh); return mcw; }//communicatorReconstruct()
void declareBindings (void) { /* === Point-to-point === */ void* buf; int count; MPI_Datatype datatype; int dest; int tag; MPI_Comm comm; MPI_Send (buf, count, datatype, dest, tag, comm); // L12 int source; MPI_Status status; MPI_Recv (buf, count, datatype, source, tag, comm, &status); // L15 MPI_Get_count (&status, datatype, &count); MPI_Bsend (buf, count, datatype, dest, tag, comm); MPI_Ssend (buf, count, datatype, dest, tag, comm); MPI_Rsend (buf, count, datatype, dest, tag, comm); void* buffer; int size; MPI_Buffer_attach (buffer, size); // L22 MPI_Buffer_detach (buffer, &size); MPI_Request request; MPI_Isend (buf, count, datatype, dest, tag, comm, &request); // L25 MPI_Ibsend (buf, count, datatype, dest, tag, comm, &request); MPI_Issend (buf, count, datatype, dest, tag, comm, &request); MPI_Irsend (buf, count, datatype, dest, tag, comm, &request); MPI_Irecv (buf, count, datatype, source, tag, comm, &request); MPI_Wait (&request, &status); int flag; MPI_Test (&request, &flag, &status); // L32 MPI_Request_free (&request); MPI_Request* array_of_requests; int index; MPI_Waitany (count, array_of_requests, &index, &status); // L36 MPI_Testany (count, array_of_requests, &index, &flag, &status); MPI_Status* array_of_statuses; MPI_Waitall (count, array_of_requests, array_of_statuses); // L39 MPI_Testall (count, array_of_requests, &flag, array_of_statuses); int incount; int outcount; int* array_of_indices; MPI_Waitsome (incount, array_of_requests, &outcount, array_of_indices, array_of_statuses); // L44--45 MPI_Testsome (incount, array_of_requests, &outcount, array_of_indices, array_of_statuses); // L46--47 MPI_Iprobe (source, tag, comm, &flag, &status); // L48 MPI_Probe (source, tag, comm, &status); MPI_Cancel (&request); MPI_Test_cancelled (&status, &flag); MPI_Send_init (buf, count, datatype, dest, tag, comm, &request); MPI_Bsend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Ssend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Rsend_init (buf, count, datatype, dest, tag, comm, &request); MPI_Recv_init (buf, count, datatype, source, tag, comm, &request); MPI_Start (&request); MPI_Startall (count, array_of_requests); void* sendbuf; int sendcount; MPI_Datatype sendtype; int sendtag; void* recvbuf; int recvcount; MPI_Datatype recvtype; MPI_Datatype recvtag; MPI_Sendrecv (sendbuf, sendcount, sendtype, dest, sendtag, recvbuf, recvcount, recvtype, source, recvtag, comm, &status); // L67--69 MPI_Sendrecv_replace (buf, count, datatype, dest, sendtag, source, recvtag, comm, &status); // L70--71 MPI_Datatype oldtype; MPI_Datatype newtype; MPI_Type_contiguous (count, oldtype, &newtype); // L74 int blocklength; { int stride; MPI_Type_vector (count, blocklength, stride, oldtype, &newtype); // L78 } { MPI_Aint stride; MPI_Type_hvector (count, blocklength, stride, oldtype, &newtype); // L82 } int* array_of_blocklengths; { int* array_of_displacements; MPI_Type_indexed (count, array_of_blocklengths, array_of_displacements, oldtype, &newtype); // L87--88 } { MPI_Aint* array_of_displacements; MPI_Type_hindexed (count, array_of_blocklengths, array_of_displacements, oldtype, &newtype); // L92--93 MPI_Datatype* array_of_types; MPI_Type_struct (count, array_of_blocklengths, array_of_displacements, array_of_types, &newtype); // L95--96 } void* location; MPI_Aint address; MPI_Address (location, &address); // L100 MPI_Aint extent; MPI_Type_extent (datatype, &extent); // L102 MPI_Type_size (datatype, &size); MPI_Aint displacement; MPI_Type_lb (datatype, &displacement); // L105 MPI_Type_ub (datatype, &displacement); MPI_Type_commit (&datatype); MPI_Type_free (&datatype); MPI_Get_elements (&status, datatype, &count); void* inbuf; void* outbuf; int outsize; int position; MPI_Pack (inbuf, incount, datatype, outbuf, outsize, &position, comm); // L114 int insize; MPI_Unpack (inbuf, insize, &position, outbuf, outcount, datatype, comm); // L116--117 MPI_Pack_size (incount, datatype, comm, &size); /* === Collectives === */ MPI_Barrier (comm); // L121 int root; MPI_Bcast (buffer, count, datatype, root, comm); // L123 MPI_Gather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); // L124--125 int* recvcounts; int* displs; MPI_Gatherv (sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, root, comm); // L128--130 MPI_Scatter (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, root, comm); // L131--132 int* sendcounts; MPI_Scatterv (sendbuf, sendcounts, displs, sendtype, recvbuf, recvcount, recvtype, root, comm); // L134--135 MPI_Allgather (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); // L136--137 MPI_Allgatherv (sendbuf, sendcount, sendtype, recvbuf, recvcounts, displs, recvtype, comm); // L138--140 MPI_Alltoall (sendbuf, sendcount, sendtype, recvbuf, recvcount, recvtype, comm); // L141--142 int* sdispls; int* rdispls; MPI_Alltoallv (sendbuf, sendcounts, sdispls, sendtype, recvbuf, recvcounts, rdispls, recvtype, comm); // L145--147 MPI_Op op; MPI_Reduce (sendbuf, recvbuf, count, datatype, op, root, comm); // L149 #if 0 MPI_User_function function; int commute; MPI_Op_create (function, commute, &op); // L153 #endif MPI_Op_free (&op); // L155 MPI_Allreduce (sendbuf, recvbuf, count, datatype, op, comm); MPI_Reduce_scatter (sendbuf, recvbuf, recvcounts, datatype, op, comm); MPI_Scan (sendbuf, recvbuf, count, datatype, op, comm); /* === Groups, contexts, and communicators === */ MPI_Group group; MPI_Group_size (group, &size); // L162 int rank; MPI_Group_rank (group, &rank); // L164 MPI_Group group1; int n; int* ranks1; MPI_Group group2; int* ranks2; MPI_Group_translate_ranks (group1, n, ranks1, group2, ranks2); // L170 int result; MPI_Group_compare (group1, group2, &result); // L172 MPI_Group newgroup; MPI_Group_union (group1, group2, &newgroup); // L174 MPI_Group_intersection (group1, group2, &newgroup); MPI_Group_difference (group1, group2, &newgroup); int* ranks; MPI_Group_incl (group, n, ranks, &newgroup); // L178 MPI_Group_excl (group, n, ranks, &newgroup); extern int ranges[][3]; MPI_Group_range_incl (group, n, ranges, &newgroup); // L181 MPI_Group_range_excl (group, n, ranges, &newgroup); MPI_Group_free (&group); MPI_Comm_size (comm, &size); MPI_Comm_rank (comm, &rank); MPI_Comm comm1; MPI_Comm comm2; MPI_Comm_compare (comm1, comm2, &result); MPI_Comm newcomm; MPI_Comm_dup (comm, &newcomm); MPI_Comm_create (comm, group, &newcomm); int color; int key; MPI_Comm_split (comm, color, key, &newcomm); // L194 MPI_Comm_free (&comm); MPI_Comm_test_inter (comm, &flag); MPI_Comm_remote_size (comm, &size); MPI_Comm_remote_group (comm, &group); MPI_Comm local_comm; int local_leader; MPI_Comm peer_comm; int remote_leader; MPI_Comm newintercomm; MPI_Intercomm_create (local_comm, local_leader, peer_comm, remote_leader, tag, &newintercomm); // L204--205 MPI_Comm intercomm; MPI_Comm newintracomm; int high; MPI_Intercomm_merge (intercomm, high, &newintracomm); // L209 int keyval; #if 0 MPI_Copy_function copy_fn; MPI_Delete_function delete_fn; void* extra_state; MPI_Keyval_create (copy_fn, delete_fn, &keyval, extra_state); // L215 #endif MPI_Keyval_free (&keyval); // L217 void* attribute_val; MPI_Attr_put (comm, keyval, attribute_val); // L219 MPI_Attr_get (comm, keyval, attribute_val, &flag); MPI_Attr_delete (comm, keyval); /* === Environmental inquiry === */ char* name; int resultlen; MPI_Get_processor_name (name, &resultlen); // L226 MPI_Errhandler errhandler; #if 0 MPI_Handler_function function; MPI_Errhandler_create (function, &errhandler); // L230 #endif MPI_Errhandler_set (comm, errhandler); // L232 MPI_Errhandler_get (comm, &errhandler); MPI_Errhandler_free (&errhandler); int errorcode; char* string; MPI_Error_string (errorcode, string, &resultlen); // L237 int errorclass; MPI_Error_class (errorcode, &errorclass); // L239 MPI_Wtime (); MPI_Wtick (); int argc; char** argv; MPI_Init (&argc, &argv); // L244 MPI_Finalize (); MPI_Initialized (&flag); MPI_Abort (comm, errorcode); }
FORT_DLL_SPEC void FORT_CALL mpi_errhandler_free_ ( MPI_Fint *v1, MPI_Fint *ierr ){ *ierr = MPI_Errhandler_free( (MPI_Errhandler *)(v1) ); }