FORT_DLL_SPEC void FORT_CALL mpi_file_preallocate_ ( MPI_Fint *v1, MPI_Offset *v2, MPI_Fint *ierr ){ #ifdef MPI_MODE_RDONLY *ierr = MPI_File_preallocate( MPI_File_f2c(*v1), (MPI_Offset)*v2 ); #else *ierr = MPI_ERR_INTERN; #endif }
void ompi_file_preallocate_f(MPI_Fint *fh, MPI_Offset *size, MPI_Fint *ierr) { int c_ierr; MPI_File c_fh = MPI_File_f2c(*fh); c_ierr = MPI_File_preallocate(c_fh, (MPI_Offset) *size); if (NULL != ierr) *ierr = OMPI_INT_2_FINT(c_ierr); }
void Mpi2Evolution::OpenFiles(int is_restart){ World* pW= World::instance(); if (pW->saveEvolStepSize == 0) return; #ifndef HAVE_MPI_THREADS if (pW->m_myRank == 0) cout << "\n!!! Warning: !!!\nSaving spin evolution in parallel JEMRIS mode requires MPI2.0 (parallel file I/O). no evol files will be written.\nUse MPI2.0 or sequential jemris.\n" << endl; #else long M = pW->TotalADCNumber / pW->saveEvolStepSize ; string fname; int SpinNo = pW->TotalSpinNumber; MPI_Bcast(&is_restart,1,MPI_INT,0,MPI_COMM_WORLD); MPI_Bcast(&SpinNo,1,MPI_INT,0,MPI_COMM_WORLD); MPI_Offset filesize; filesize = (SpinNo * 7 +2)* sizeof(double); MPI_Status status; for (int i=0; i<M; i++) { stringstream sF; sF << pW->saveEvolFileName << "_" << setw(3) << setfill('0') << i+1 << ".bin"; if (is_restart != 1){ // delete existing old file; (c) by Rolf Rabenseifer... /* MPI::File fh=MPI::File::Open(MPI::COMM_WORLD,(sF.str()).c_str(),MPI::MODE_DELETE_ON_CLOSE | MPI::MODE_CREATE | MPI::MODE_WRONLY, MPI::INFO_NULL ); fh.Close();*/ // above lines lead to trouble on our cluster. try different approach: if (pW->m_myRank == 0) { ofstream myfile; myfile.open ((sF.str()).c_str(),ios::out | ios::binary| ios::trunc); myfile.close(); } MPI_Barrier(MPI_COMM_WORLD); } MPI_File mpifh; MPI_File_open (MPI_COMM_WORLD, charstar(sF.str()),MPI_MODE_WRONLY | MPI_MODE_CREATE, MPI_INFO_NULL, &mpifh); m_files.push_back(mpifh); MPI_File_preallocate(m_files[i], filesize); if (pW->m_myRank == 1) { double dummy = (double) SpinNo; MPI_File_write_at(m_files[i], 0, &dummy, 1, MPI_DOUBLE,&status); m_first_write.push_back(true); } } #endif }
/** * \brief Initialization function of measure function * measure_MPI_IO_write_large_file_once(). * * Only one process is active. It writes once to a file. * * Since SKaMPI measurement functions are not allowed to use MPI_Offset * parameters, it is impossible to tell an init_-routine to create a file * which is larger than \f$2^{\mbox{\texttt{sizeof(int)}}-1}-1\f$ directly. As * a preliminary solution we use a parameter (<tt>power</tt>) which commits the * power to 2 as an indicator for the file size. * * Remark concerning the <em>HP XC6000</em>:<br> * Measurements showed that there is no significant difference between MPI-API * and POSIX-API I/O accesses, if files are larger than 1MB. Thus there is no * choice between these two modes like in measure_MPI_IO_read_file_once(), * which makes type compatibilty problems much easier. Only MPI-API is * supported. * * \param[in] power size of memory buffer; 2 to the power of `power' <tt>MPI_BYTE</tt>s * \param[in] create_flag write into existing file (FALSE) or create it (TRUE) * * \return void */ void init_MPI_IO_write_large_file_once (int power, int create_flag) { MPI_Offset size; char *error_string; io_filename = get_io_filename (IO_FILENAME, 0); if (get_measurement_rank () == 0){ if (power > MAXIMUM_POWER || power < 0){ error_string = strerror (EINVAL); error_with_abort (errno, "\ninit_MPI_IO_write_large_file_once (int %d, int %d) failed." "\nInvalid power argument." "\nError: %s\n", power, create_flag, error_string); } size = ((MPI_Offset) 1) << power; if (create_flag == 0){ MPI_File_open (MPI_COMM_SELF, io_filename, MPI_MODE_WRONLY | MPI_MODE_CREATE | MPI_MODE_UNIQUE_OPEN, MPI_INFO_NULL, &io_fh); MPI_File_preallocate (io_fh, size); MPI_File_close (&io_fh); } set_send_buffer_usage (size); set_reported_message_size (size); } MPI_Barrier (get_measurement_comm ()); /* set synchronization type: SYNC_BARRIER if all SKaMPI processes run on one physical processor SYNC_REAL if every SKaMPI process runs on its own physical processor */ set_synchronization (SYNC_REAL); init_synchronization (); }
JNIEXPORT void JNICALL Java_mpi_File_preallocate( JNIEnv *env, jobject jthis, jlong fh, jlong size) { int rc = MPI_File_preallocate((MPI_File)fh, (MPI_Offset)size); ompi_java_exceptionCheck(env, rc); }
/* * Open a file through the MPIIO interface. Setup file view. */ static void *MPIIO_Open(char *testFileName, IOR_param_t * param) { int fd_mode = (int)0, offsetFactor, tasksPerFile, transfersPerBlock = param->blockSize / param->transferSize; struct fileTypeStruct { int globalSizes[2], localSizes[2], startIndices[2]; } fileTypeStruct; MPI_File *fd; MPI_Comm comm; MPI_Info mpiHints = MPI_INFO_NULL; fd = (MPI_File *) malloc(sizeof(MPI_File)); if (fd == NULL) ERR("malloc failed()"); *fd = 0; /* set IOR file flags to MPIIO flags */ /* -- file open flags -- */ if (param->openFlags & IOR_RDONLY) { fd_mode |= MPI_MODE_RDONLY; } if (param->openFlags & IOR_WRONLY) { fd_mode |= MPI_MODE_WRONLY; } if (param->openFlags & IOR_RDWR) { fd_mode |= MPI_MODE_RDWR; } if (param->openFlags & IOR_APPEND) { fd_mode |= MPI_MODE_APPEND; } if (param->openFlags & IOR_CREAT) { fd_mode |= MPI_MODE_CREATE; } if (param->openFlags & IOR_EXCL) { fd_mode |= MPI_MODE_EXCL; } if (param->openFlags & IOR_TRUNC) { fprintf(stdout, "File truncation not implemented in MPIIO\n"); } if (param->openFlags & IOR_DIRECT) { fprintf(stdout, "O_DIRECT not implemented in MPIIO\n"); } /* * MPI_MODE_UNIQUE_OPEN mode optimization eliminates the overhead of file * locking. Only open a file in this mode when the file will not be con- * currently opened elsewhere, either inside or outside the MPI environment. */ fd_mode |= MPI_MODE_UNIQUE_OPEN; if (param->filePerProc) { comm = MPI_COMM_SELF; } else { comm = testComm; } SetHints(&mpiHints, param->hintsFileName); /* * note that with MP_HINTS_FILTERED=no, all key/value pairs will * be in the info object. The info object that is attached to * the file during MPI_File_open() will only contain those pairs * deemed valid by the implementation. */ /* show hints passed to file */ if (rank == 0 && param->showHints) { fprintf(stdout, "\nhints passed to MPI_File_open() {\n"); ShowHints(&mpiHints); fprintf(stdout, "}\n"); } MPI_CHECK(MPI_File_open(comm, testFileName, fd_mode, mpiHints, fd), "cannot open file"); /* show hints actually attached to file handle */ if (rank == 0 && param->showHints) { MPI_CHECK(MPI_File_get_info(*fd, &mpiHints), "cannot get file info"); fprintf(stdout, "\nhints returned from opened file {\n"); ShowHints(&mpiHints); fprintf(stdout, "}\n"); } /* preallocate space for file */ if (param->preallocate && param->open == WRITE) { MPI_CHECK(MPI_File_preallocate(*fd, (MPI_Offset) (param->segmentCount * param->blockSize * param->numTasks)), "cannot preallocate file"); } /* create file view */ if (param->useFileView) { /* create contiguous transfer datatype */ MPI_CHECK(MPI_Type_contiguous (param->transferSize / sizeof(IOR_size_t), MPI_LONG_LONG_INT, ¶m->transferType), "cannot create contiguous datatype"); MPI_CHECK(MPI_Type_commit(¶m->transferType), "cannot commit datatype"); if (param->filePerProc) { offsetFactor = 0; tasksPerFile = 1; } else { offsetFactor = (rank + rankOffset) % param->numTasks; tasksPerFile = param->numTasks; } /* * create file type using subarray */ fileTypeStruct.globalSizes[0] = 1; fileTypeStruct.globalSizes[1] = transfersPerBlock * tasksPerFile; fileTypeStruct.localSizes[0] = 1; fileTypeStruct.localSizes[1] = transfersPerBlock; fileTypeStruct.startIndices[0] = 0; fileTypeStruct.startIndices[1] = transfersPerBlock * offsetFactor; MPI_CHECK(MPI_Type_create_subarray (2, fileTypeStruct.globalSizes, fileTypeStruct.localSizes, fileTypeStruct.startIndices, MPI_ORDER_C, param->transferType, ¶m->fileType), "cannot create subarray"); MPI_CHECK(MPI_Type_commit(¶m->fileType), "cannot commit datatype"); MPI_CHECK(MPI_File_set_view(*fd, (MPI_Offset) 0, param->transferType, param->fileType, "native", (MPI_Info) MPI_INFO_NULL), "cannot set file view"); } return ((void *)fd); }
int main(int argc, char **argv) { int buf[1024], amode, flag, mynod, len, i; MPI_File fh; MPI_Status status; MPI_Datatype newtype; MPI_Offset disp, offset; MPI_Group group; MPI_Datatype etype, filetype; char datarep[25], *filename; MPI_Init(&argc,&argv); MPI_Comm_rank(MPI_COMM_WORLD, &mynod); /* process 0 takes the file name as a command-line argument and broadcasts it to other processes */ if (!mynod) { i = 1; while ((i < argc) && strcmp("-fname", *argv)) { i++; argv++; } if (i >= argc) { printf("\n*# Usage: misc <mpiparameter> -- -fname filename\n\n"); MPI_Abort(MPI_COMM_WORLD, 1); } argv++; len = strlen(*argv); filename = (char *) malloc(len+1); strcpy(filename, *argv); MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD); MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD); } else { MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD); filename = (char *) malloc(len+1); MPI_Bcast(filename, len+1, MPI_CHAR, 0, MPI_COMM_WORLD); } MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh); MPI_File_write(fh, buf, 1024, MPI_INT, &status); MPI_File_sync(fh); MPI_File_get_amode(fh, &amode); if (!mynod) printf("testing MPI_File_get_amode\n"); if (amode != (MPI_MODE_CREATE | MPI_MODE_RDWR)) printf("amode is %d, should be %d\n\n", amode, MPI_MODE_CREATE | MPI_MODE_RDWR); MPI_File_get_atomicity(fh, &flag); if (flag) printf("atomicity is %d, should be 0\n", flag); if (!mynod) printf("setting atomic mode\n"); MPI_File_set_atomicity(fh, 1); MPI_File_get_atomicity(fh, &flag); if (!flag) printf("atomicity is %d, should be 1\n", flag); MPI_File_set_atomicity(fh, 0); if (!mynod) printf("reverting back to nonatomic mode\n"); MPI_Type_vector(10, 10, 20, MPI_INT, &newtype); MPI_Type_commit(&newtype); MPI_File_set_view(fh, 1000, MPI_INT, newtype, "native", MPI_INFO_NULL); if (!mynod) printf("testing MPI_File_get_view\n"); MPI_File_get_view(fh, &disp, &etype, &filetype, datarep); if ((disp != 1000) || strcmp(datarep, "native")) printf("disp = %I64, datarep = %s, should be 1000, native\n\n", disp, datarep); if (!mynod) printf("testing MPI_File_get_byte_offset\n"); MPI_File_get_byte_offset(fh, 10, &disp); if (disp != (1000+20*sizeof(int))) printf("byte offset = %I64, should be %d\n\n", disp, (int) (1000+20*sizeof(int))); MPI_File_get_group(fh, &group); if (!mynod) printf("testing MPI_File_set_size\n"); MPI_File_set_size(fh, 1000+15*sizeof(int)); MPI_Barrier(MPI_COMM_WORLD); MPI_File_sync(fh); MPI_File_get_size(fh, &disp); if (disp != 1000+15*sizeof(int)) printf("file size = %I64, should be %d\n\n", disp, (int) (1000+15*sizeof(int))); if (!mynod) printf("seeking to eof and testing MPI_File_get_position\n"); MPI_File_seek(fh, 0, MPI_SEEK_END); MPI_File_get_position(fh, &disp); if (disp != 10) printf("file pointer posn = %I64, should be 10\n\n", disp); if (!mynod) printf("testing MPI_File_get_byte_offset\n"); MPI_File_get_byte_offset(fh, disp, &offset); if (offset != (1000+20*sizeof(int))) printf("byte offset = %I64, should be %d\n\n", offset, (int) (1000+20*sizeof(int))); MPI_Barrier(MPI_COMM_WORLD); if (!mynod) printf("testing MPI_File_seek with MPI_SEEK_CUR\n"); MPI_File_seek(fh, -10, MPI_SEEK_CUR); MPI_File_get_position(fh, &disp); MPI_File_get_byte_offset(fh, disp, &offset); if (offset != 1000) printf("file pointer posn in bytes = %I64, should be 1000\n\n", offset); if (!mynod) printf("preallocating disk space up to 8192 bytes\n"); MPI_File_preallocate(fh, 8192); if (!mynod) printf("closing the file and deleting it\n"); MPI_File_close(&fh); MPI_Barrier(MPI_COMM_WORLD); if (!mynod) MPI_File_delete(filename, MPI_INFO_NULL); MPI_Type_free(&newtype); MPI_Type_free(&filetype); MPI_Group_free(&group); free(filename); MPI_Finalize(); return 0; }
int main (int argc, char **argv) { MPI_Request request; MPI_File fh; MPI_Datatype ftype; MPI_Offset offset; MPI_Status status; int rank, wsize, fsize, i; char file_name[128]; int buf[BUF_SIZE * TEST_OPS]; int count; MPI_Init (&argc, &argv); MPI_Comm_size (MPI_COMM_WORLD, &wsize); MPI_Comm_rank (MPI_COMM_WORLD, &rank); strcpy (file_name, argv[0]); strcat (file_name, ".tmp"); MPI_File_open (MPI_COMM_WORLD, file_name, MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fh); fsize = wsize * BUF_SIZE * TEST_OPS; MPI_File_preallocate (fh, fsize); memset (buf, 0, BUF_SIZE * TEST_OPS); offset = 0; count = BLOCK_SIZE; for (i = 0; i < TEST_OPS; i++) { offset = i * BLOCK_SIZE + (rank * BLOCK_SIZE * TEST_OPS); MPI_File_seek (fh, offset, MPI_SEEK_SET); MPI_File_write (fh, buf, count, MPI_INT, &status); MPI_File_seek (fh, offset, MPI_SEEK_SET); MPI_File_read (fh, buf, count, MPI_INT, &status); } for (i = 0; i < TEST_OPS; i++) { offset = i * BLOCK_SIZE + (rank * BLOCK_SIZE * TEST_OPS); MPI_File_write_at (fh, offset, buf, count, MPI_INT, &status); MPI_File_read_at (fh, offset, buf, count, MPI_INT, &status); } MPI_Type_vector (fsize / BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE * wsize, MPI_INT, &ftype); MPI_Type_commit (&ftype); offset = rank * BLOCK_SIZE * TEST_OPS; count = BLOCK_SIZE * TEST_OPS; MPI_File_set_view (fh, offset, MPI_INT, ftype, "native", MPI_INFO_NULL); MPI_File_write_all (fh, buf, count, MPI_INT, &status); MPI_File_read_all (fh, buf, count, MPI_INT, &status); MPI_File_close (&fh); MPI_Finalize (); }