static int test_mpio_1wMr(char *filename, int special_request) { char hostname[128]; int mpi_size, mpi_rank; MPI_File fh; char mpi_err_str[MPI_MAX_ERROR_STRING]; int mpi_err_strlen; int mpi_err; unsigned char writedata[DIMSIZE], readdata[DIMSIZE]; unsigned char expect_val; int i, irank; int nerrs = 0; /* number of errors */ int atomicity; MPI_Offset mpi_off; MPI_Status mpi_stat; MPI_Comm_size(MPI_COMM_WORLD, &mpi_size); MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank); if (MAINPROCESS && VERBOSE_MED){ printf("Testing one process writes, all processes read.\n"); printf("Using %d processes accessing file %s\n", mpi_size, filename); printf(" (Filename can be specified via program argument)\n"); } /* show the hostname so that we can tell where the processes are running */ if (VERBOSE_DEF){ if (gethostname(hostname, 128) < 0){ PRINTID; printf("gethostname failed\n"); return 1; } PRINTID; printf("hostname=%s\n", hostname); } /* Delete any old file in order to start anew. */ /* Must delete because MPI_File_open does not have a Truncate mode. */ /* Don't care if it has error. */ MPI_File_delete(filename, MPI_INFO_NULL); MPI_Barrier(MPI_COMM_WORLD); /* prevent racing condition */ if ((mpi_err = MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDWR | MPI_MODE_CREATE , MPI_INFO_NULL, &fh)) != MPI_SUCCESS){ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; printf("MPI_File_open failed (%s)\n", mpi_err_str); return 1; } if (special_request & USEATOM){ /* ================================================== * Set atomcity to true (1). A POSIX compliant filesystem * should not need this. * ==================================================*/ if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str); } if (VERBOSE_HI) printf("Initial atomicity = %d\n", atomicity); if ((mpi_err = MPI_File_set_atomicity(fh, 1)) != MPI_SUCCESS){ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; printf("MPI_File_set_atomicity failed (%s)\n", mpi_err_str); } if ((mpi_err = MPI_File_get_atomicity(fh, &atomicity)) != MPI_SUCCESS){ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; printf("MPI_File_get_atomicity failed (%s)\n", mpi_err_str); } if (VERBOSE_HI) printf("After set_atomicity atomicity = %d\n", atomicity); } /* This barrier is not necessary but do it anyway. */ MPI_Barrier(MPI_COMM_WORLD); if (VERBOSE_HI){ PRINTID; printf("between MPI_Barrier and MPI_File_write_at\n"); } /* ================================================== * Each process calculates what to write but * only process irank(0) writes. * ==================================================*/ irank=0; for (i=0; i < DIMSIZE; i++) writedata[i] = irank*DIMSIZE + i; mpi_off = irank*DIMSIZE; /* Only one process writes */ if (mpi_rank==irank){ if (VERBOSE_HI){ PRINTID; printf("wrote %d bytes at %ld\n", DIMSIZE, (long)mpi_off); } if ((mpi_err = MPI_File_write_at(fh, mpi_off, writedata, DIMSIZE, MPI_BYTE, &mpi_stat)) != MPI_SUCCESS){ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; printf("MPI_File_write_at offset(%ld), bytes (%d), failed (%s)\n", (long) mpi_off, DIMSIZE, mpi_err_str); return 1; }; }; /* Bcast the return code and */ /* make sure all writing are done before reading. */ MPI_Bcast(&mpi_err, 1, MPI_INT, irank, MPI_COMM_WORLD); if (VERBOSE_HI){ PRINTID; printf("MPI_Bcast: mpi_err = %d\n", mpi_err); } if (special_request & USEFSYNC){ /* ================================================== * Do a file sync. A POSIX compliant filesystem * should not need this. * ==================================================*/ if (VERBOSE_HI) printf("Apply MPI_File_sync\n"); /* call file_sync to force the write out */ if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS){ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; printf("MPI_File_sync failed (%s)\n", mpi_err_str); } MPI_Barrier(MPI_COMM_WORLD); /* call file_sync to force the write out */ if ((mpi_err = MPI_File_sync(fh)) != MPI_SUCCESS){ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; printf("MPI_File_sync failed (%s)\n", mpi_err_str); } } /* This barrier is not necessary because the Bcase or File_sync above */ /* should take care of it. Do it anyway. */ MPI_Barrier(MPI_COMM_WORLD); if (VERBOSE_HI){ PRINTID; printf("after MPI_Barrier\n"); } /* ================================================== * Each process reads what process 0 wrote and verify. * ==================================================*/ irank=0; mpi_off = irank*DIMSIZE; if ((mpi_err = MPI_File_read_at(fh, mpi_off, readdata, DIMSIZE, MPI_BYTE, &mpi_stat)) != MPI_SUCCESS){ MPI_Error_string(mpi_err, mpi_err_str, &mpi_err_strlen); PRINTID; printf("MPI_File_read_at offset(%ld), bytes (%d), failed (%s)\n", (long) mpi_off, DIMSIZE, mpi_err_str); return 1; }; for (i=0; i < DIMSIZE; i++){ expect_val = irank*DIMSIZE + i; if (readdata[i] != expect_val){ PRINTID; printf("read data[%d:%d] got %02x, expect %02x\n", irank, i, readdata[i], expect_val); nerrs++; } } MPI_File_close(&fh); if (VERBOSE_HI){ PRINTID; printf("%d data errors detected\n", nerrs); } mpi_err = MPI_Barrier(MPI_COMM_WORLD); return nerrs; }
int main(int argc, char **argv) { int errs = 0; MPI_Status status, *status_array = 0; int count = 0, flag, idx, rc, errlen, *indices = 0, outcnt; MPI_Request *reqs = 0; char errmsg[MPI_MAX_ERROR_STRING]; MTest_Init(&argc, &argv); MPI_Comm_set_errhandler(MPI_COMM_WORLD, MPI_ERRORS_RETURN); rc = MPI_Testall(count, reqs, &flag, status_array); if (rc != MPI_SUCCESS) { MPI_Error_string(rc, errmsg, &errlen); printf("MPI_Testall returned failure: %s\n", errmsg); errs++; } else if (!flag) { printf("MPI_Testall(0, ...) did not return a true flag\n"); errs++; } rc = MPI_Waitall(count, reqs, status_array); if (rc != MPI_SUCCESS) { MPI_Error_string(rc, errmsg, &errlen); printf("MPI_Waitall returned failure: %s\n", errmsg); errs++; } rc = MPI_Testany(count, reqs, &idx, &flag, &status); if (rc != MPI_SUCCESS) { MPI_Error_string(rc, errmsg, &errlen); printf("MPI_Testany returned failure: %s\n", errmsg); errs++; } else if (!flag) { printf("MPI_Testany(0, ...) did not return a true flag\n"); errs++; } rc = MPI_Waitany(count, reqs, &idx, &status); if (rc != MPI_SUCCESS) { MPI_Error_string(rc, errmsg, &errlen); printf("MPI_Waitany returned failure: %s\n", errmsg); errs++; } rc = MPI_Testsome(count, reqs, &outcnt, indices, status_array); if (rc != MPI_SUCCESS) { MPI_Error_string(rc, errmsg, &errlen); printf("MPI_Testsome returned failure: %s\n", errmsg); errs++; } rc = MPI_Waitsome(count, reqs, &outcnt, indices, status_array); if (rc != MPI_SUCCESS) { MPI_Error_string(rc, errmsg, &errlen); printf("MPI_Waitsome returned failure: %s\n", errmsg); errs++; } MTest_Finalize(errs); MPI_Finalize(); return 0; }
void parallel_readwrite(char *file_name, void *dump_buffer, int type_of_file, int is_write, long long offset) { #if MPI && DO_PARALLEL_WRITE MPI_File fh; MPI_Status status; MPI_Datatype mpi_elementary_type, mpi_file_type; int file_open_error, file_write_error ; int error_string_length; char error_string[BUFSIZ]; MPI_Offset file_size; int count; void *mpi_buffer; size_t mpi_buffer_size; int mode; MPI_Offset mpi_offset; MPI_Barrier(MPI_COMM_WORLD); if (is_write) { mode = MPI_MODE_CREATE | MPI_MODE_WRONLY | MPI_MODE_APPEND; } else { mode = MPI_MODE_RDONLY; } file_open_error = MPI_File_open(MPI_COMM_WORLD, file_name, mode, MPI_INFO_NULL, &fh); if (file_open_error != MPI_SUCCESS) { MPI_Error_string(file_open_error, error_string, &error_string_length); fprintf(stderr, "parallel_readwrite(): error opening file: %3d: %s\n", mpi_rank, error_string); MPI_Abort(MPI_COMM_WORLD, file_open_error); /* It is still OK to abort, because we have failed to open the file. */ } else { // if (i_am_the_master) // chmod(file_name, S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH); if (offset < 0L) { if(is_write) { MPI_File_get_position(fh, &mpi_offset); offset = mpi_offset; } else { offset = 0L; } } MPI_Barrier(MPI_COMM_WORLD); //differentiate data type and buffers involved based on file type if( DUMP_FILE == type_of_file ) { mpi_elementary_type = MPI_DUMP_TYPE; mpi_file_type = dump_file_type; mpi_buffer = (void*)dump_buffer; mpi_buffer_size = dump_buffer_size; } else if( GDUMP_FILE == type_of_file){ mpi_elementary_type = MPI_GDUMP_TYPE; mpi_file_type = gdump_file_type; mpi_buffer = (void*)gdump_buffer; mpi_buffer_size = gdump_buffer_size; } else if( GDUMP2_FILE == type_of_file){ mpi_elementary_type = MPI_GDUMP2_TYPE; mpi_file_type = gdump2_file_type; mpi_buffer = (void*)gdump2_buffer; mpi_buffer_size = gdump2_buffer_size; } else if( RDUMP_FILE == type_of_file){ mpi_elementary_type = MPI_RDUMP_TYPE; mpi_file_type = rdump_file_type; mpi_buffer = (void*)rdump_buffer; mpi_buffer_size = rdump_buffer_size; } else if( FDUMP_FILE == type_of_file){ mpi_elementary_type = MPI_FDUMP_TYPE; mpi_file_type = fdump_file_type; mpi_buffer = (void*)fdump_buffer; mpi_buffer_size = fdump_buffer_size; } else { if(i_am_the_master) fprintf(stderr, "Unknown file type %d\n", type_of_file); MPI_File_close(&fh); MPI_Finalize(); exit(2); } MPI_File_set_view(fh, offset, mpi_elementary_type, mpi_file_type, "native", MPI_INFO_NULL); if (is_write) { file_write_error = MPI_File_write_all(fh, mpi_buffer, mpi_buffer_size, mpi_elementary_type, &status); } else { file_write_error = MPI_File_read_all(fh, mpi_buffer, mpi_buffer_size, mpi_elementary_type, &status); } if (file_write_error != MPI_SUCCESS) { MPI_Error_string(file_write_error, error_string, &error_string_length); fprintf(stderr, "parallel_readwrite(): error %s file: %3d: %s\n", (is_write)?("writing"):("reading"), mpi_rank, error_string); MPI_File_close(&fh); //if (i_am_the_master) MPI_File_delete(file_name, MPI_INFO_NULL); MPI_Finalize(); exit(1); } // MPI_Get_count(&status, MPI_FLOAT, &count); // MPI_File_get_size(fh, &file_size); // if(1) { // printf("%3d: wrote %d floats, expected to write %lld floats\n", mpi_rank, count, (long long int)dump_buffer_size); // printf("%3d: file size is %lld bytes, header-related offset is %lld\n", mpi_rank, file_size, offset); // } MPI_File_close(&fh); } #endif }
int sci_mpi_recv(char *fname, unsigned long fname_len) { SciErr sciErr; int iRet = 0; int *piBuffer = NULL; int iBufferSize = 0; int *piAddr1 = NULL; int *piAddr2 = NULL; double Tag = 0; double Rank = 0; MPI_Status status; CheckInputArgument(pvApiCtx, 2, 2); CheckOutputArgument(pvApiCtx, 1, 1); //Rank sciErr = getVarAddressFromPosition(pvApiCtx, 1, &piAddr1); if (sciErr.iErr) { printError(&sciErr, 0); Scierror(999, _("%s: Can not read input argument #%d.\n"), fname, 1); return 0; } if (getScalarDouble(pvApiCtx, piAddr1, &Rank)) { Scierror(999, _("%s: Wrong type for input argument #%d: A scalar integer value expected.\n"), fname, 1); return 0; } //Tag sciErr = getVarAddressFromPosition(pvApiCtx, 2, &piAddr2); if (sciErr.iErr) { printError(&sciErr, 0); Scierror(999, _("%s: Can not read input argument #%d.\n"), fname, 2); return 0; } if (getScalarDouble(pvApiCtx, piAddr2, &Tag)) { Scierror(999, _("%s: Wrong type for input argument #%d: A scalar integer value expected.\n"), fname, 2); return 0; } //wait message "Rank" node iRet = MPI_Probe((int)Rank, (int)Tag, MPI_COMM_WORLD, &status); if (iRet != MPI_SUCCESS) { char error_string[MPI_MAX_ERROR_STRING]; int length_of_error_string; MPI_Error_string(iRet, error_string, &length_of_error_string); Scierror(999, _("%s: MPI_Probe failed. Rank %d / Tag %d: %s\n"), fname, Rank, Tag, error_string); return 0; } //get data size iRet = MPI_Get_count(&status, MPI_INT, &iBufferSize); if (iRet != MPI_SUCCESS) { char error_string[MPI_MAX_ERROR_STRING]; int length_of_error_string; MPI_Error_string(iRet, error_string, &length_of_error_string); Scierror(999, _("%s: MPI_Get_count failed. Rank %d / Tag %d: %s\n"), fname, Rank, Tag, error_string); return 0; } //alloc memory to receive data piBuffer = (int *)MALLOC(sizeof(int) * iBufferSize); if (piBuffer == NULL) { Scierror(999, _("%s: Could not create the received variable.\n"), fname); return 0; } //receive data iRet = MPI_Recv(piBuffer, iBufferSize, MPI_INT, (int)Rank, (int)Tag, MPI_COMM_WORLD, &status); if (iRet != MPI_SUCCESS) { char error_string[MPI_MAX_ERROR_STRING]; int length_of_error_string; MPI_Error_string(iRet, error_string, &length_of_error_string); Scierror(999, _("%s: MPI_Recv failed. Rank %d / Tag %d: %s\n"), fname, Rank, Tag, error_string); return 0; } //convert data from MPI to Scilab iRet = deserialize_from_mpi(pvApiCtx, piBuffer, iBufferSize); FREE(piBuffer); if (iRet) { Scierror(999, _("%s: Unable to deserialize data !\n"), fname); return 0; } AssignOutputVariable(pvApiCtx, 1) = nbInputArgument(pvApiCtx) + 1; ReturnArguments(pvApiCtx); return 0; }
std::string MPI::errorString(int code) { char buf[MPI_MAX_ERROR_STRING]; int length = 0; MPI_Error_string(code, buf, &length); return std::string(buf, length); }
LibMeshInit::~LibMeshInit() { // Every processor had better be ready to exit at the same time. // This would be a libmesh_parallel_only() function, except that // libmesh_parallel_only() uses libmesh_assert() which throws an // exception() which causes compilers to scream about exceptions // inside destructors. // Even if we're not doing parallel_only debugging, we don't want // one processor to try to exit until all others are done working. this->comm().barrier(); // We can't delete, finalize, etc. more than once without // reinitializing in between libmesh_exceptionless_assert(!libMesh::closed()); // Delete reference counted singleton(s) Singleton::cleanup(); // Clear the thread task manager we started task_scheduler.reset(); // Force the \p ReferenceCounter to print // its reference count information. This allows // us to find memory leaks. By default the // \p ReferenceCounter only prints its information // when the last created object has been destroyed. // That does no good if we are leaking memory! ReferenceCounter::print_info (); // Print an informative message if we detect a memory leak if (ReferenceCounter::n_objects() != 0) { libMesh::err << "Memory leak detected!" << std::endl; #if !defined(LIBMESH_ENABLE_REFERENCE_COUNTING) || defined(NDEBUG) libMesh::err << "Compile in DEBUG mode with --enable-reference-counting" << std::endl << "for more information" << std::endl; #endif } // print the perflog to individual processor's file. libMesh::perflog.print_log(); // Now clear the logging object, we don't want it to print // a second time during the PerfLog destructor. libMesh::perflog.clear(); // Reconnect the output streams // (don't do this, or we will get messages from objects // that go out of scope after the following return) //std::cout.rdbuf(std::cerr.rdbuf()); // Set the initialized() flag to false libMeshPrivateData::_is_initialized = false; if (libMesh::on_command_line ("--redirect-stdout") || libMesh::on_command_line ("--redirect-output")) { // If stdout/stderr were redirected to files, reset them now. libMesh::out.rdbuf (out_buf); libMesh::err.rdbuf (err_buf); } // If we built our own output streams, we want to clean them up. if (libMesh::on_command_line ("--separate-libmeshout")) { delete libMesh::out.get(); delete libMesh::err.get(); libMesh::out.reset(std::cout); libMesh::err.reset(std::cerr); } #ifdef LIBMESH_ENABLE_EXCEPTIONS // Reset the old terminate handler; maybe the user code wants to // keep doing C++ stuff after closing libMesh stuff. std::set_terminate(old_terminate_handler); #endif if (libMesh::on_command_line("--enable-fpe")) libMesh::enableFPE(false); #if defined(LIBMESH_HAVE_PETSC) // Allow the user to bypass PETSc finalization if (!libMesh::on_command_line ("--disable-petsc") #if defined(LIBMESH_HAVE_MPI) && !libMesh::on_command_line ("--disable-mpi") #endif ) { # if defined(LIBMESH_HAVE_SLEPC) if (libmesh_initialized_slepc) SlepcFinalize(); # else if (libmesh_initialized_petsc) PetscFinalize(); # endif } #endif #if defined(LIBMESH_HAVE_MPI) && defined(LIBMESH_HAVE_VTK) _vtk_mpi_controller->Finalize(/*finalized_externally=*/1); _vtk_mpi_controller->Delete(); #endif #if defined(LIBMESH_HAVE_MPI) // Allow the user to bypass MPI finalization if (!libMesh::on_command_line ("--disable-mpi")) { this->_comm.clear(); if (libmesh_initialized_mpi) { // We can't just libmesh_assert here because destructor, // but we ought to report any errors unsigned int error_code = MPI_Finalize(); if (error_code != MPI_SUCCESS) { char error_string[MPI_MAX_ERROR_STRING+1]; int error_string_len; MPI_Error_string(error_code, error_string, &error_string_len); std::cerr << "Failure from MPI_Finalize():\n" << error_string << std::endl; } } } #endif }