예제 #1
0
void ReadCombinedParallelFile(ug::BinaryBuffer &buffer, std::string strFilename, pcl::ProcessCommunicator pc)
{
	MPI_Status status;
	MPI_Comm m_mpiComm = pc.get_mpi_communicator();
	MPI_File fh;

	char filename[1024];
	strcpy(filename, strFilename.c_str());
	if(MPI_File_open(m_mpiComm, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, MPI_INFO_NULL, &fh))
		UG_THROW("could not open "<<filename);

	std::vector<int> allNextOffsets;
	allNextOffsets.resize(pc.size()+1);

	allNextOffsets[0] = (pc.size()+1)*sizeof(int);
	bool bFirst = pc.get_proc_id(0) == pcl::ProcRank();
	if(bFirst)
	{
		int numProcs;
		MPI_File_read(fh, &numProcs, sizeof(numProcs), MPI_BYTE, &status);
		UG_COND_THROW(numProcs != pcl::NumProcs(), "checkPoint numProcs = " << numProcs << ", but running on " << pcl::NumProcs());

		for(size_t i=1; i<allNextOffsets.size(); i++)
		{
			MPI_File_read(fh, &allNextOffsets[i], sizeof(allNextOffsets[i]), MPI_BYTE, &status);
//			UG_LOG("allNextOffsets[" << i << "] = " << allNextOffsets[i] << "\n");
		}
	}
	int myNextOffset, myNextOffset2;
	MPI_Scatter(&allNextOffsets[0], 1, MPI_INT, &myNextOffset, 1, MPI_INT, pc.get_proc_id(0), m_mpiComm);
	MPI_Scatter(&allNextOffsets[1], 1, MPI_INT, &myNextOffset2, 1, MPI_INT, pc.get_proc_id(0), m_mpiComm);

	int mySize = myNextOffset2-myNextOffset;

//	UG_LOG_ALL_PROCS("MySize = " << mySize << "\n" << "myNextOffset = " << myNextOffset << " - " << myNextOffset2 << "\n");

	MPI_File_seek(fh, myNextOffset, MPI_SEEK_SET);

	char *p = new char[mySize];
	MPI_File_read(fh, p, mySize, MPI_BYTE, &status);
	buffer.clear();
	buffer.reserve(mySize);
	buffer.write(p, mySize);
	delete[] p;

	MPI_File_close(&fh);
	//	UG_LOG("File read.\n");
}
예제 #2
0
파일: localio.c 프로젝트: jonarbo/KUBE
/**
 * \brief Measures the time to read once from a large file.
 *
 * Only one process is active. It writes once to a file.
 * 
 * Since SKaMPI measurement functions are not allowed to use MPI_Offset
 * parameters, it is impossible to tell an init_-routine to create a file
 * which is larger than \f$2^{\mbox{\texttt{sizeof(int)}}-1}-1\f$ directly. As
 * a preliminary solution we use a parameter (<tt>power</tt>) which commits the
 * power to 2 as an indicator for the file size.
 *
 * Remark concerning the <em>HP XC6000</em>:<br>
 * Measurements showed that there is no significant difference between MPI-API
 * and POSIX-API I/O accesses, if files are larger than 1MB. Thus there is no
 * choice between these two modes like in measure_MPI_IO_read_file_once(),
 * which makes type compatibilty problems much easier. Only MPI-API is
 * supported.
 * 
 * \param[in] power size of memory buffer; 2 to the power of `power' <tt>MPI_BYTE</tt>s
 *
 * \return    measured time 
 */
double measure_MPI_IO_read_large_file_once (int power){
  MPI_Offset size;
  double     start_time = 1.0, end_time = 0.0;
  
  if (get_measurement_rank () == 0){
    
    size = ((MPI_Offset) 1) << power;

    MPI_File_open (MPI_COMM_SELF, io_filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &io_fh);
    MPI_File_set_view (io_fh, (MPI_Offset)0, 
		       MPI_BYTE, MPI_BYTE,
		       "native", MPI_INFO_NULL);
    
    start_time = start_synchronization ();
    MPI_File_read (io_fh, get_recv_buffer (), size, MPI_BYTE, MPI_STATUS_IGNORE);
    end_time = stop_synchronization ();
    
    MPI_File_close (&io_fh);
  }
  else if (get_measurement_rank () != 0) {
    start_synchronization ();
  }
  stop_synchronization ();

  if (get_measurement_rank () == 0)
    return end_time - start_time;
  else
    return -1.0;

}
예제 #3
0
void seissol::checkpoint::mpio::Wavefield::load(double &time, int &timestepWaveField)
{
	logInfo(rank()) << "Loading wave field checkpoint";

	seissol::checkpoint::CheckPoint::load();

	MPI_File file = open();
	if (file == MPI_FILE_NULL)
		logError() << "Could not open checkpoint file";

	// Read and broadcast header
	checkMPIErr(setHeaderView(file));

	Header header;
	if (rank() == 0)
		checkMPIErr(MPI_File_read(file, &header, 1, headerType(), MPI_STATUS_IGNORE));

	MPI_Bcast(&header, 1, headerType(), 0, comm());
	time = header.time;
	timestepWaveField = header.timestepWavefield;

	// Read dofs
	checkMPIErr(setDataView(file));
	checkMPIErr(MPI_File_read_all(file, dofs(), numDofs(), MPI_DOUBLE, MPI_STATUS_IGNORE));

	// Close the file
	checkMPIErr(MPI_File_close(&file));
}
예제 #4
0
bool seissol::checkpoint::mpio::Wavefield::validate(MPI_File file) const
{
	if (setHeaderView(file) != 0) {
		logWarning() << "Could not set checkpoint header view";
		return false;
	}

	int result = true;

	if (rank() == 0) {
		Header header;

		// Check the header
		MPI_File_read(file, &header, 1, headerType(), MPI_STATUS_IGNORE);

		if (header.identifier != identifier()) {
			logWarning() << "Checkpoint identifier does match";
			result = false;
		} else if (header.partitions != partitions()) {
			logWarning() << "Number of partitions in checkpoint does not match";
			result = false;
		}
	}

	// Make sure everybody knows the result of the validation
	MPI_Bcast(&result, 1, MPI_INT, 0, comm());

	return result;
}
예제 #5
0
void seissol::checkpoint::mpio::Wavefield::load(real* dofs)
{
	logInfo(rank()) << "Loading wave field checkpoint";

	seissol::checkpoint::CheckPoint::setLoaded();

	MPI_File file = open();
	if (file == MPI_FILE_NULL)
		logError() << "Could not open checkpoint file";

	// Read and broadcast header
	checkMPIErr(setHeaderView(file));

	if (rank() == 0)
		checkMPIErr(MPI_File_read(file, header().data(), 1, headerType(), MPI_STATUS_IGNORE));

	MPI_Bcast(header().data(), 1, headerType(), 0, comm());

	// Read dofs
	checkMPIErr(setDataView(file));
	checkMPIErr(MPI_File_read_all(file, dofs, numDofs(), MPI_DOUBLE, MPI_STATUS_IGNORE));

	// Close the file
	checkMPIErr(MPI_File_close(&file));
}
예제 #6
0
void leitura()
{
	int i, ret;
	buf_leitura = (int*) malloc (TAMBUF*sizeof(int));

	ret = MPI_File_open(	MPI_COMM_WORLD, "arquivo.dat", 
							MPI_MODE_RDONLY, 
							MPI_INFO_NULL, &arquivo);
	if (ret == 0)
		printf("Arquivo aberto com sucesso no processo %d \n", meu_ranque);
	else 
	{
		printf("Arquivo aberto com erro no processo %d \n", meu_ranque);
		MPI_Abort(MPI_COMM_WORLD, 1);
	}
	
	MPI_File_set_view(	arquivo, meu_ranque*TAMBUF*sizeof(int),
						MPI_INT, MPI_INT, "native", MPI_INFO_NULL);

	MPI_File_read(		arquivo, buf_leitura, 
						TAMBUF, MPI_INT,
						MPI_STATUS_IGNORE);

	for (i = 0; i < TAMBUF; ++i)
		printf("%d\n", buf_leitura[i]);
}
예제 #7
0
bool
NodePartitionedMeshReader::readBinaryDataFromFile(std::string const& filename,
    MPI_Offset offset, MPI_Datatype type, DATA& data) const
{
    // Check container size
    if (!is_safely_convertable<std::size_t, int>(data.size()))
    {
        ERR("The container size is too large for MPI_File_read() call.");
        return false;
    }

    // Open file
    MPI_File file;

    char* filename_char = const_cast<char*>(filename.data());
    int const file_status = MPI_File_open(_mpi_comm, filename_char,
            MPI_MODE_RDONLY, MPI_INFO_NULL, &file);

    if(file_status != 0)
    {
        ERR("Error opening file %s. MPI error code %d", filename.c_str(), file_status);
        return false;
    }

    // Read data
    char file_mode[] = "native";
    MPI_File_set_view(file, offset, type, type, file_mode, MPI_INFO_NULL);
    // The static cast is checked above.
    MPI_File_read(file, data.data(), static_cast<int>(data.size()), type,
        MPI_STATUS_IGNORE);
    MPI_File_close(&file);

    return true;
}
예제 #8
0
파일: readf.c 프로젝트: davidheryanto/sc14
FORTRAN_API void FORT_CALL mpi_file_read_(MPI_Fint *fh,void *buf,int *count,
                  MPI_Datatype *datatype,MPI_Status *status, int *ierr )
{
    MPI_File fh_c;
    
    fh_c = MPI_File_f2c(*fh);
    *ierr = MPI_File_read(fh_c,buf,*count,*datatype,status);
}
예제 #9
0
파일: localio.c 프로젝트: jonarbo/KUBE
/**
 * \brief Measures the time to read once from a file.
 *
 * Only one process is active. It reads once from a file.
 *
 * Remark:<br>
 * With the <tt>O_DIRECT</tt> flag set, cache effects are minimized, because I/O
 * is done directly to/from user space buffers. The operation system's page
 * cache is bypassed. Under Linux 2.6 alignment to 512-byte boundaries is
 * required for buffer and file offset. Thus the following parameters should be
 * set in a SKaMPI input file:
 * - <tt>set_send_buffert_alignment (512)</tt>
 * - <tt>set_recv_buffert_alignment (512)</tt>
 * - <tt>switch_buffer_cycling_off ()</tt><br>
 *
 * <tt>O_DIRECT</tt> is only relevant if the POSIX-API is used for I/O.
 * 
 * For more information please refer to the <tt>open ()</tt> man pages.
 *
 * \param[in] size  size of memory buffer, i.e. number of <tt>MPI_BYTE</tt>s
 * \param[in] api   POSIX-API or MPI-API for I/O accesses
 * \param[in] directio_flag open file with <tt>O_DIRECT</tt> flag to minimize
 *                          cache effects
 *
 * \return    measured time 
 */
double measure_MPI_IO_read_file_once (int size, char *api, int directio_flag){
  double     start_time = 1.0, end_time = 0.0;
  int        open_flags;
  char       *error_string;
  
  if (get_measurement_rank () == 0){
    if (strcmp (api, POSIX_API) == 0){ 

      if (directio_flag != 0)
	open_flags = O_RDONLY | O_DIRECT;
      else
	open_flags = O_RDONLY;

      printf ("flags %d,%d\n", open_flags, O_DIRECT);

      errno = 0;
      if ((io_fd = open (io_filename, open_flags)) < 0){
	error_string = strerror (errno);
	error_with_abort (errno,
			  "\nmeasure_MPI_IO_read_file_once (int %d, char * %s, int %d) failed."
			  "\nCannot open local file (read only mode)."
			  "\nError: %s\n",
			  size, api, directio_flag, error_string);
      }
    
      start_time = start_synchronization ();
      read (io_fd, get_recv_buffer (), size);
      end_time = MPI_Wtime ();

      close (io_fd);

    }
    else{
      MPI_File_open (MPI_COMM_SELF, io_filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &io_fh);
      MPI_File_set_view (io_fh, (MPI_Offset)0, 
			 MPI_BYTE, MPI_BYTE,
			 "native", MPI_INFO_NULL);

      start_time = start_synchronization ();
      MPI_File_read (io_fh, get_recv_buffer (), size, MPI_BYTE, MPI_STATUS_IGNORE);
      end_time = stop_synchronization ();

      MPI_File_close (&io_fh);

    }
  }
  else if (get_measurement_rank () != 0) {
    start_synchronization ();
  }
  stop_synchronization ();

  if (get_measurement_rank () == 0)
    return end_time - start_time;
  else
    return -1.0;

}
예제 #10
0
//***************************************************************************************************************
void ChimeraCheckRDP::readName(string namefile) {
	try{
	
		string name;

	#ifdef USE_MPI
		
		MPI_File inMPI;
		MPI_Offset size;
		MPI_Status status;

		//char* inFileName = new char[namefile.length()];
		//memcpy(inFileName, namefile.c_str(), namefile.length());
		
		char inFileName[1024];
		strcpy(inFileName, namefile.c_str());

		MPI_File_open(MPI_COMM_WORLD, inFileName, MPI_MODE_RDONLY, MPI_INFO_NULL, &inMPI);  
		MPI_File_get_size(inMPI, &size);

		//delete inFileName;

		char* buffer = new char[size];
		MPI_File_read(inMPI, buffer, size, MPI_CHAR, &status);

		string tempBuf = buffer;
		if (tempBuf.length() > size) { tempBuf = tempBuf.substr(0, size);  }
		istringstream iss (tempBuf,istringstream::in);
		delete buffer;
		
		while(!iss.eof()) {
			iss >> name; m->gobble(iss);
			names[name] = name;
		}
	
		MPI_File_close(&inMPI);
		
	#else	
	
		ifstream in;
		m->openInputFile(namefile, in);
				
		while (!in.eof()) {
			in >> name; m->gobble(in);
			names[name] = name;
		}
		in.close();
	
	#endif
	
	}
	catch(exception& e) {
		m->errorOut(e, "ChimeraCheckRDP", "readName");
		exit(1);
	}
}
예제 #11
0
파일: readf.c 프로젝트: davidheryanto/sc14
void mpi_file_read_(MPI_Fint *fh,void *buf,int *count,
                  MPI_Fint *datatype,MPI_Status *status, int *ierr )
{
    MPI_File fh_c;
    MPI_Datatype datatype_c;
    
    fh_c = MPI_File_f2c(*fh);
    datatype_c = MPI_Type_f2c(*datatype);

    *ierr = MPI_File_read(fh_c,buf,*count,datatype_c,status);
}
예제 #12
0
파일: mpi_File.c 프로젝트: urids/XSCALAMPI
JNIEXPORT void JNICALL Java_mpi_File_read(
        JNIEnv *env, jobject jthis, jlong fh, jobject buf, jboolean db,
        jint off, jint count, jlong jType, jint bType, jlongArray stat)
{
    MPI_Datatype type = (MPI_Datatype)jType;
    void *ptr;
    ompi_java_buffer_t *item;
    ompi_java_getWritePtr(&ptr, &item, env, buf, db, count, type);
    MPI_Status status;
    int rc = MPI_File_read((MPI_File)fh, ptr, count, type, &status);
    ompi_java_exceptionCheck(env, rc);
    ompi_java_releaseWritePtr(ptr, item, env, buf, db, off, count, type, bType);
    ompi_java_status_set(env, stat, &status);
}
예제 #13
0
/* parallel_fread()    
 * Wrapper for fread using MPI routines. Put a frame into the frame buffer.
 * To be consistent with older ptraj routines return 0 on error, 1 on success. 
 */
int parallel_fread(coordinateInfo *C) {
#ifdef MPI
  int err;
  MPI_Status status;

  err=MPI_File_read( *(C->mfp),C->buffer,C->frameSize,MPI_CHAR,&status);
  if (err!=MPI_SUCCESS) {
    printMPIerr(err,"trajFile_fread");
    return 0;
  }

  return 1;
#endif
  return 0;
}
예제 #14
0
int
main(int argc, char **argv)
{
   /* MPI stuff. */
   MPI_File fh;
   int my_rank, mpi_size;
   int data_in;
   MPI_Status status;

   /* Initialize MPI. */
   MPI_Init(&argc,&argv);
   MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
   MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
   /*MPI_Get_processor_name(mpi_name, &mpi_namelen);*/
   /*printf("mpi_name: %s size: %d rank: %d\n", mpi_name, 
     mpi_size, my_rank);*/

   if (my_rank == 0)
   {
      printf("\n*** Testing basic MPI file I/O.\n");
      printf("*** testing file create with parallel I/O with MPI...");
   }

   if (MPI_File_open(MPI_COMM_WORLD, FILE, MPI_MODE_RDWR | MPI_MODE_CREATE,
                     MPI_INFO_NULL, &fh) != MPI_SUCCESS) ERR;
   if (MPI_File_seek(fh, my_rank * sizeof(int), MPI_SEEK_SET) != MPI_SUCCESS) ERR;
   if (MPI_File_write(fh, &my_rank, 1, MPI_INT, &status) != MPI_SUCCESS) ERR;
   if (MPI_File_close(&fh) != MPI_SUCCESS) ERR;

   /* Reopen and check the file. */
   if (MPI_File_open(MPI_COMM_WORLD, FILE, MPI_MODE_RDONLY,
                     MPI_INFO_NULL, &fh) != MPI_SUCCESS) ERR;
   if (MPI_File_seek(fh, my_rank * sizeof(int), MPI_SEEK_SET) != MPI_SUCCESS) ERR;
   if (MPI_File_read(fh, &data_in, 1, MPI_INT, &status) != MPI_SUCCESS) ERR;
   if (data_in != my_rank) ERR;
   if (MPI_File_close(&fh) != MPI_SUCCESS) ERR;

   /* Shut down MPI. */
   MPI_Finalize();

   if (my_rank == 0)
   {
      SUMMARIZE_ERR;
      FINAL_RESULTS;
   }
   return 0;
}
예제 #15
0
파일: divvy.c 프로젝트: eduffy/divvy
void load_file(char *filename, struct buffer *buf)
{
   MPI_File fh;
   MPI_Status status;
   int    size, rank;
   long   filesize, chunksize;
   int    count;
   int    rescode;

   MPI_Comm_size(MPI_COMM_WORLD, &size);
   MPI_Comm_rank(MPI_COMM_WORLD, &rank);

   filesize  = getfilesize(filename);
   chunksize = (long) ceil((double)filesize / size);

   if(rank == 0) {
      printf("File size:    %12ld bytes\n", filesize);
      printf("Chunk size:   %12ld bytes\n", chunksize);
      printf("Num chunks:   %12d\n", size);
   }

   rescode = MPI_File_open(MPI_COMM_WORLD,
                           filename,
                           MPI_MODE_RDONLY,
                           MPI_INFO_NULL,
                           &fh);
   if(rescode == MPI_ERR_IO) {
      fprintf(stderr,
              "Process #%d cannot open %s for reading.\n",
              rank, filename);
      MPI_Abort(MPI_COMM_WORLD, 1);
   }
   buf->data = malloc(chunksize + MAX_PARTIAL_SIZE);
   MPI_File_set_view(fh,
                     rank * chunksize,
                     MPI_CHAR, MPI_CHAR,
                     "native", MPI_INFO_NULL);
   MPI_File_read(fh, buf->data, chunksize, MPI_CHAR, &status);
   MPI_Get_count(&status, MPI_CHAR, &count);
   MPI_File_close(&fh);

   buf->start = buf->data;
   buf->end   = buf->data + count;
}
예제 #16
0
void BIL_Pio_read_raw_blocks(MPI_Comm all_readers_comm, MPI_Comm io_comm,
                             int num_blocks, BIL_Block* blocks) {
  int i;
  for (i = 0; i < num_blocks; i++) {
    MPI_File fp;
    BIL_Timing_fopen_start(all_readers_comm);
    assert(MPI_File_open(io_comm, blocks[i].file_name, MPI_MODE_RDONLY,
                         BIL->io_hints, &fp) == MPI_SUCCESS);
    BIL_Timing_fopen_stop(all_readers_comm);

    // Get variable and subarray datatype for I/O.
    MPI_Datatype var_type;
    assert(MPI_Type_contiguous(blocks[i].var_size, MPI_BYTE, &var_type)
           == MPI_SUCCESS);
    assert(MPI_Type_commit(&var_type) == MPI_SUCCESS); // added by TP
    MPI_Datatype file_type;
    assert(MPI_Type_create_subarray(blocks[i].num_dims,
                                    blocks[i].file_dim_sizes,
                                    blocks[i].sizes, blocks[i].starts,
                                    MPI_ORDER_C, var_type, 
                                    &file_type) == MPI_SUCCESS);
    assert(MPI_Type_commit(&file_type) == MPI_SUCCESS);

    assert(MPI_File_set_view(fp, BIL->io_header_size, var_type, file_type,
                             (char *)"native",
                             MPI_INFO_NULL) == MPI_SUCCESS);
    // Allocate data and read it collectively.
    blocks[i].data = BIL_Misc_malloc(blocks[i].total_size * blocks[i].var_size);
    BIL_Timing_io_start(all_readers_comm);
    assert(MPI_File_read(fp, blocks[i].data, blocks[i].total_size,
                         var_type, MPI_STATUS_IGNORE) == MPI_SUCCESS);
    BIL_Timing_io_stop(all_readers_comm,
                       blocks[i].total_size * blocks[i].var_size);

    // Clean up.
    MPI_File_close(&fp);
    MPI_Type_free(&var_type);
    MPI_Type_free(&file_type);
  }
}
예제 #17
0
/*
 * parallel_fgets()
 * Like fgets, use mpi file routines to get all chars up to and including 
 * null or newline. Returns buffer, or NULL on error. 
 */
char *parallel_fgets(char *buffer, int num, coordinateInfo *C) {
#ifdef MPI
  int i,err;

  for (i=0; i<num-1; i++) {
    err=MPI_File_read(*(C->mfp),buffer+i,1,MPI_CHAR,MPI_STATUS_IGNORE);
    if (err!=MPI_SUCCESS) {
      printMPIerr(err,"parallel_fgets");
      return NULL;
    }
    if (buffer[i]=='\n' || buffer[i]=='\0') {i++; break;} // Always have i be 1 more char ahead   
  }

  if (i==num && buffer[i-1]=='\n')
    buffer[i-1]='\0';
  else
    buffer[i]='\0';

  return buffer;
#endif
  return NULL;
}
예제 #18
0
int main(int argc, char *argv[]) 
{ 
  int size, rank, buf[2]; 
  MPI_File fh; 
  MPI_Status st;

  MPI_Init(&argc, &argv);
  MPI_Comm_size(MPI_COMM_WORLD, &size);
  MPI_Comm_rank(MPI_COMM_WORLD, &rank);

  // open file for read
  MPI_File_open(MPI_COMM_WORLD, argv[1], MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
  // set starting offset for the read operation
  MPI_File_set_view(fh, rank*8, MPI_INT, MPI_INT, "native", MPI_INFO_NULL);
  // read two integers from the file
  MPI_File_read(fh, buf, 2, MPI_INT, &st);
  MPI_File_close(&fh);

  // print out the two integers
  printf("rank=%d: buf=[%d,%d]\n", rank, buf[0], buf[1]);
  MPI_Finalize();
  return 0;
}
예제 #19
0
	std::streamsize readwrite(char* s, std::streamsize n) {
		std::streamsize count = 0;

		while (n > 0) {
			int io_chunk_size = std::numeric_limits< int >::max();
			if (io_chunk_size > n)
				io_chunk_size = n;

			MPI_Status status;
			int error = dowrite ?
					MPI_File_write(this->mpifile, (void*)s, io_chunk_size, MPI_BYTE, &status) :
					MPI_File_read(this->mpifile, (void*)s, io_chunk_size, MPI_BYTE, &status);
			if (error != MPI_SUCCESS)
				break;

			int actual_io;
			MPI_Get_count(&status, MPI_BYTE, &actual_io);

			n -= actual_io;
			count += actual_io;
			s += actual_io;

			if (dowrite)
				++this->writereqs;
			else
				++this->readreqs;

			if (actual_io < io_chunk_size) // EOF
				break;
		}

		if (dowrite)
			this->byteswritten += count;
		else
			this->bytesread += count;
		return count;
	}
예제 #20
0
void mpi_file_read_f(MPI_Fint *fh, char *buf, MPI_Fint *count,
		     MPI_Fint *datatype, MPI_Fint *status, MPI_Fint *ierr)
{
   MPI_File c_fh = MPI_File_f2c(*fh);
   MPI_Datatype c_type = MPI_Type_f2c(*datatype);
   MPI_Status *c_status;
#if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT
   MPI_Status c_status2;
#endif

   /* See if we got MPI_STATUS_IGNORE */
   if (OMPI_IS_FORTRAN_STATUS_IGNORE(status)) {
      c_status = MPI_STATUS_IGNORE;
   } else {
      /* If sizeof(int) == sizeof(INTEGER), then there's no
         translation necessary -- let the underlying functions write
         directly into the Fortran status */

#if OMPI_SIZEOF_FORTRAN_INTEGER == SIZEOF_INT
      c_status = (MPI_Status *) status;
#else
      c_status = &c_status2;
#endif
   }
    
   *ierr = OMPI_INT_2_FINT(MPI_File_read(c_fh, OMPI_F2C_BOTTOM(buf),
                                         OMPI_FINT_2_INT(*count),
                                         c_type, c_status));

#if OMPI_SIZEOF_FORTRAN_INTEGER != SIZEOF_INT
   if (MPI_SUCCESS == OMPI_FINT_2_INT(*ierr) && 
       MPI_STATUS_IGNORE != c_status) {
      MPI_Status_c2f(c_status, status);
   }
#endif
}
예제 #21
0
void
PullInMPI_IOSymbols()
{
#ifdef PARALLEL

    //Don't call this!
    EXCEPTION1(ImproperUseException, "Do not call PullInMPI_IOSymbols");

    MPI_Info info;
    MPI_File fh;
    MPI_Offset sz;
    char *nm;
    int whence;
    void *buf;
    int count;
    MPI_Datatype datatype;
    MPI_Status status;

    MPI_File_open(VISIT_MPI_COMM, nm, 0, info, &fh);
    MPI_File_get_size(fh, &sz);
    MPI_File_seek(fh, sz, whence);
    MPI_File_read(fh, buf, count, datatype, &status);
#endif
}
예제 #22
0
/*----< main() >------------------------------------------------------------*/
int main(int argc, char **argv)
{
    int i, j, err, rank, np, num_io;
    char *buf, *filename;
    int rank_dim[2], array_of_sizes[2];
    int array_of_subsizes[2];
    int count, *blocklengths, global_array_size;
    MPI_Count ftype_size;
    MPI_Aint *displacements;
    MPI_File fh;
    MPI_Datatype ftype;
    MPI_Request *request;
    MPI_Status *statuses;
    MPI_Status status;
    MPI_Offset offset = 0;
    int nr_errors = 0;
#ifdef VERBOSE
    int k;
#endif

    MPI_Init(&argc, &argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);
    MPI_Comm_size(MPI_COMM_WORLD, &np);

    if (np != 4) {
        if (!rank)
            printf("Please run with 4 processes. Exiting ...\n\n");
        MPI_Finalize();
        return 1;
    }

    filename = (argc > 1) ? argv[1] : "testfile";

    num_io = 2;

    request = (MPI_Request *) malloc(num_io * sizeof(MPI_Request));
    statuses = (MPI_Status *) malloc(num_io * sizeof(MPI_Status));

    /*-----------------------------------------------------------------------*/
    /* process rank in each dimension */
    rank_dim[0] = rank / 2;
    rank_dim[1] = rank % 2;

    /* global 2D array size */
    array_of_sizes[0] = YLEN * 2;
    array_of_sizes[1] = XLEN * 2;

    global_array_size = array_of_sizes[0] * array_of_sizes[1];

    array_of_subsizes[0] = YLEN / 2;
    array_of_subsizes[1] = XLEN * SUB_XLEN / 5;

    offset = rank_dim[0] * YLEN * array_of_sizes[1] + rank_dim[1] * XLEN;

    /* define data type for file view */
    count = array_of_subsizes[0] * 2;   /* 2 is the no. blocks along X */
    blocklengths = (int *) malloc(count * sizeof(int));
    displacements = (MPI_Aint *) malloc(count * sizeof(MPI_Aint));
    for (i = 0; i < count; i++)
        blocklengths[i] = array_of_subsizes[1] / 2;
    for (i = 0; i < array_of_subsizes[0]; i++)
        for (j = 0; j < 2; j++)
            displacements[i * 2 + j] = offset + i * 2 * array_of_sizes[1]
                + j * XLEN / 2;
    MPI_Type_create_hindexed(count, blocklengths, displacements, MPI_CHAR, &ftype);
    MPI_Type_commit(&ftype);
    MPI_Type_size_x(ftype, &ftype_size);

/* subarray's layout in the global array

   P0's 's layout                               P1's layout
   [ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9] | [ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9]
[ 0] 0 1 2     3 4 5                          |                       D E F     G H I
[ 1]                                          |
[ 2] 6 7 8     9 : ;                          |                       J K L     M N O
[ 3]                                          |
[ 4]                                          |
[ 5]                                          |
[ 6]                                          |
[ 7]                                          |
[ 8]                                          |
[ 9]                                          |

   P2's 's layout                               P3's layout
   [ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9] | [ 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9]
[ 0]                                          |
[ 1]                                          |
[ 2]                                          |
[ 3]                                          |
[ 4]                                          |
[ 5] X Y Z     [ \ ]                          |                       l m n     o p q
[ 6]                                          |
[ 7] ^ _ `     a b c                          |                       r s t     u v w
[ 8]                                          |
[ 9]                                          |
*/

    /* initialize the write buffer */
    buf = (char *) malloc(array_of_subsizes[0] * array_of_subsizes[1]);
    for (i = 0; i < array_of_subsizes[0] * array_of_subsizes[1]; i++)
        buf[i] = '0' + rank * 20 + i % 79;

    /* zero file contents --------------------------------------------------- */
    if (rank == 0) {
        char *wr_buf = (char *) calloc(num_io * global_array_size, 1);
        MPI_File_open(MPI_COMM_SELF, filename,
                      MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
        MPI_File_write(fh, wr_buf, num_io * global_array_size, MPI_CHAR, &status);
        MPI_File_close(&fh);
        free(wr_buf);
    }
    /* open the file -------------------------------------------------------- */
    err = MPI_File_open(MPI_COMM_WORLD, filename,
                        MPI_MODE_CREATE | MPI_MODE_WRONLY, MPI_INFO_NULL, &fh);
    if (err != MPI_SUCCESS) {
        printf("Error: MPI_File_open() filename %s\n", filename);
        MPI_Abort(MPI_COMM_WORLD, -1);
        exit(1);
    }

    /* MPI nonblocking collective write */
    for (i = 0; i < num_io; i++) {
        offset = i * global_array_size;
        /* set the file view */
        MPI_File_set_view(fh, offset, MPI_BYTE, ftype, "native", MPI_INFO_NULL);
        MPI_File_iwrite_all(fh, buf, ftype_size, MPI_CHAR, &request[i]);
    }
    MPI_Waitall(num_io, request, statuses);
    MPI_File_close(&fh);

    /* read and print file contents ----------------------------------------- */
    if (rank == 0) {
        char *ptr;
        char *rd_buf = (char *) calloc(num_io * global_array_size, 1);
        MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
        MPI_File_read(fh, rd_buf, num_io * global_array_size, MPI_CHAR, &status);
        MPI_File_close(&fh);

#ifdef VERBOSE
        printf("-------------------------------------------------------\n");
        printf("   [");
        for (i = 0; i < 2; i++) {
            for (j = 0; j < XLEN; j++)
                printf(" %d", j);
            printf(" ");
        }
        printf("]\n\n");


        ptr = rd_buf;
        for (k = 0; k < num_io; k++) {
            for (i = 0; i < 2 * YLEN; i++) {
                printf("[%2d]", k * 2 * YLEN + i);
                for (j = 0; j < 2 * XLEN; j++) {
                    if (j > 0 && j % XLEN == 0)
                        printf(" ");
                    if (*ptr != 0)
                        printf(" %c", *ptr);
                    else
                        printf("  ");
                    ptr++;
                }
                printf("\n");
            }
            printf("\n");
        }
#endif
        ptr = rd_buf;
        for (i = 0; i < 2 * YLEN * num_io; i++) {
            for (j = 0; j < 2 * XLEN; j++) {
                if (*ptr != compare_buf[i][j]) {
                    fprintf(stderr, "expected %d got %d at [%d][%d]\n",
                            *ptr, compare_buf[i][j], i, j);
                    nr_errors++;
                }
                ptr++;
            }
        }
        free(rd_buf);

        if (nr_errors == 0)
            fprintf(stdout, " No Errors\n");
        else
            fprintf(stderr, "Found %d errors\n", nr_errors);
    }

    free(blocklengths);
    free(displacements);
    free(buf);
    free(request);
    free(statuses);
    MPI_Type_free(&ftype);
    MPI_Finalize();
    return 0;
}
예제 #23
0
int main(int argc, char **argv)
{
    MPI_File fh;
    MPI_Status status;
    MPI_Offset size;
    long long *buf, i;
    char *filename;
    int j, mynod, nprocs, len, flag, err;

    MPI_Init(&argc,&argv);

    MPI_Comm_rank(MPI_COMM_WORLD, &mynod);
    MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

    if (nprocs != 1) {
	fprintf(stderr, "Run this program on one process only\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }

    i = 1;
    while ((i < argc) && strcmp("-fname", *argv)) {
	i++;
	argv++;
    }
    if (i >= argc) {
	fprintf(stderr, "\n*#  Usage: large -fname filename\n\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }
    argv++;
    len = strlen(*argv);
    filename = (char *) malloc(len+1);
    strcpy(filename, *argv);
    fprintf(stderr, "This program creates an 4 Gbyte file. Don't run it if you don't have that much disk space!\n");

    buf = (long long *) malloc(SIZE * sizeof(long long));
    if (!buf) {
	fprintf(stderr, "not enough memory to allocate buffer\n");
	MPI_Abort(MPI_COMM_WORLD, 1);
    }

    MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
                  MPI_INFO_NULL, &fh);

    for (i=0; i<NTIMES; i++) {
	for (j=0; j<SIZE; j++)
	    buf[j] = i*SIZE + j;
	
	err = MPI_File_write(fh, buf, SIZE, MPI_DOUBLE, &status);
        /* MPI_DOUBLE because not all MPI implementations define
           MPI_LONG_LONG_INT, even though the C compiler supports long long. */
        if (err != MPI_SUCCESS) {
	    fprintf(stderr, "MPI_File_write returned error\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
    }

    MPI_File_get_size(fh, &size);
    fprintf(stderr, "file size = %lld bytes\n", size);

    MPI_File_seek(fh, 0, MPI_SEEK_SET);

    for (j=0; j<SIZE; j++) buf[j] = -1;

    flag = 0;
    for (i=0; i<NTIMES; i++) {
	err = MPI_File_read(fh, buf, SIZE, MPI_DOUBLE, &status);
        /* MPI_DOUBLE because not all MPI implementations define
           MPI_LONG_LONG_INT, even though the C compiler supports long long. */
        if (err != MPI_SUCCESS) {
	    fprintf(stderr, "MPI_File_write returned error\n");
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
	for (j=0; j<SIZE; j++) 
	    if (buf[j] != i*SIZE + j) {
		fprintf(stderr, "error: buf %d is %lld, should be %lld \n", j, buf[j], 
                                 i*SIZE + j);
		flag = 1;
	    }
    }

    if (!flag) fprintf(stderr, "Data read back is correct\n");
    MPI_File_close(&fh);

    free(buf);
    free(filename);
    MPI_Finalize(); 
    return 0;
}
예제 #24
0
int main( int argc, char *argv[] )
{
    unsigned int itr;

    int operacao;
    int verbose;
    int juntar;
    char * chave_file;
    char * entrada_file;
    char * saida_file;

    octeto Nb,Nk,Nr;
    octeto bloco[4*8];
    octeto chave[4*8*15];

    int worldsize, rank;
    MPI_Status status;
    MPI_File chave_handle;
    MPI_File entrada_handle;
    MPI_File saida_handle;

    MPI_Offset entrada_bytes;
    unsigned int numero_blocos;
    unsigned int blocos_processo;
    MPI_Offset bloco_byte_inicio;
    MPI_Offset bloco_byte_fim;
    MPI_Offset iterador;

    Tabela * tabela;
    octeto * tabelaEmpacotada;
    unsigned int proc;
    unsigned int tamanho_tabela;
    Tabela * tabela2;
    unsigned int no_proc;
    unsigned int no_resto;
    unsigned int i;
    BTreeNode * node;
    Indice * indice;


    MPI_Init(&argc,&argv);

    MPI_Comm_size(MPI_COMM_WORLD,&worldsize);
    MPI_Comm_rank(MPI_COMM_WORLD,&rank);

    operacao = INDEFINIDA;
    verbose = 0;
    juntar = 0;
    chave_file = NULL;
    entrada_file = NULL;
    saida_file = NULL;
    for(itr = 1;itr < (unsigned int)argc;itr++)
    {
/* Instrucoes de uso */
        if( strcmp(argv[itr],"-a") == 0 || strcmp(argv[itr],"--ajuda") == 0 || 
            strcmp(argv[itr],"-h") == 0 || strcmp(argv[itr],"--help") == 0 )
        {
            if(rank == 0)
            {
                printf(" Uso: mpiexec -n [PROCESSOS] ./sm-rijndael [ARGUMENTO VALOR].\n");
                printf(" Encripta/Decripta um arquivo usando o algoritmo Rijndael(AES) extendido,\n");
                printf(" realizando um pre-processamento de blocos repetidos.\n");
                printf("  Argumentos opcionais:\n");
                printf("   -v,--verbose: Exibe mensagens de conclusao da operacao.\n");
                printf("   -j,--juntar: Concatena as tabelas de cada processo em um mestre.\n");
                printf("  Argumentos obrigatorios:\n");
                printf("   -op,--operacao: Informa se o objetivo da execucao eh encriptar ou decriptar.\n");
                printf("                    * Os valores possiveis sao: \'encriptar\' e \'decriptar\'.\n");
                printf("   -e,-i,--entrada,--input: Caminho e nome do arquivo a ser criptografado.\n");
                printf("   -s,-o,--saida,--output: Caminho e nome do arquivo resultante do processo de criptografia da entrada.\n");
                printf("   -c,-k,--chave,--key: Caminho e nome do arquivo contendo a chave.\n");
                printf("  O arquivo contendo a chave eh em formato binario de acordo com a seguinte especificacao:\n");
                printf("   - O primeiro byte deve conter o tamanho do bloco (em palavras de 4 bytes).\n");
                printf("      * O bloco pode possuir tamanho: 4, 5, 6, 7 ou 8.\n");
                printf("   - O segundo byte deve conter o tamanho da chave (em palavras de 4 bytes).\n");
                printf("      * Esta aplicacao aceita chaves com tamanho: 4, 5, 6, 7 ou 8.\n");
                printf("   - Os proximos 4*[tamanho da chave] bytes do arquivo sao os bytes componentes da chave, que\n");
                printf("     devem estar (obrigatoriamente) escritos no formato hexadecimal da linguagem C (0xff).\n");
                printf("   * Eh recomendavel o uso de um editor hexadecimal na construcao do arquivo chave.\n");
            }
            goto finalizando;
        }

/* Juntar: Concatena as tabelas de cada processo em um mestre */
        else
        if( strcmp(argv[itr],"-j") == 0 || strcmp(argv[itr],"--juntar") == 0)
        {
            juntar = 1;
        }

/* Verbose: exibir mensagens de finalizacao */
        else
        if( strcmp(argv[itr],"-v") == 0 || strcmp(argv[itr],"--verbose") == 0)
        {
            verbose = 1;
        }

/* Operacao a ser realizada */
        else
        if( strcmp(argv[itr],"-op") == 0 || strcmp(argv[itr],"--operacao") == 0 )
        {
            if( itr+1 < argc )
            {
                if( strcmp(argv[itr+1],"encriptar") == 0 )
                {
                    operacao = ENCRIPTAR;
                }
                else
                if( strcmp(argv[itr+1],"decriptar") == 0 )
                {
                    operacao = DECRIPTAR;
                }
                itr++;
            }
            else
            {
                goto sempar;
            }
        }

/* Arquivo com a chave */
        else
        if( strcmp(argv[itr],"-c") == 0 || strcmp(argv[itr],"--chave") == 0 || 
            strcmp(argv[itr],"-k") == 0 || strcmp(argv[itr],"--key") == 0 )
        {
            if(itr+1 < argc)
            {
                chave_file = argv[itr+1];
                itr++;
            }
            else
            {
                goto sempar;
            }
        }

/* Arquivo de entrada */
        else
        if( strcmp(argv[itr],"-e") == 0 || strcmp(argv[itr],"--entrada") == 0 || 
            strcmp(argv[itr],"-i") == 0 || strcmp(argv[itr],"--input") == 0 )
        {
            if(itr+1 < argc)
            {
                entrada_file = argv[itr+1];
                itr++;
            }
            else
            {
                goto sempar;
            }
        }

/* Arquivo de saida */
        else 
        if( strcmp(argv[itr],"-s") == 0 || strcmp(argv[itr],"--saida") == 0 || 
            strcmp(argv[itr],"-o") == 0 || strcmp(argv[itr],"--output") == 0 )
        {
            if(itr+1 < argc)
            {
                saida_file = argv[itr+1];
                itr++;
            }
            else
            {
                goto sempar;
            }
        }
/* Erro desconhecido */
        else
        {
            if(rank == 0)
            {
                printf("Erro nos argumentos passados.\n");
            }
            goto help;
        }
    }
/* Fim da leitura dos argumentos */

    if( operacao == INDEFINIDA || chave_file == NULL || entrada_file == NULL || saida_file == NULL )
    {
        if(rank == 0)
        {
            if( operacao == INDEFINIDA )
                printf("A operacao a ser realizada eh invalida ou nao foi especificada.\n");
            if( chave_file == NULL )
                printf("Esta faltando especificar o arquivo com a chave.\n");
            if( entrada_file == NULL )
                printf("Esta faltando especificar o arquivo de entrada.\n");
            if( saida_file == NULL )
                printf("Esta faltando especificar o arquivo de saida.\n");
        }
        goto help;
    }
/* Fim do tratamento dos argumentos */

    if( MPI_File_open( MPI_COMM_WORLD, chave_file, MPI_MODE_RDONLY, MPI_INFO_NULL, &chave_handle ) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na abertura do arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    if( MPI_File_read(chave_handle,&Nb,1, MPI_BYTE,&status) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na leitura do tamanho de um bloco no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }
    if( Nb< 4 || Nb > 8 )
    {
        if( rank == 0 )
        {
            printf("Tamanho de bloco invalido no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    if( MPI_File_read(chave_handle,&Nk,1, MPI_BYTE,&status) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na leitura do tamanho da chave no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }
    if( Nk< 4 || Nk > 8 )
    {
        if( rank == 0 )
        {
            printf("Tamanho de chave invalido no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    if( MPI_File_read(chave_handle,chave,4*Nk,MPI_BYTE,&status) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na leitura da chave no arquivo com a chave (%s).\n",chave_file);
        }
        goto help;
    }

    MPI_File_close( &chave_handle );
    Nr = numero_rodadas(Nb,Nk);
    KeyExpansion(chave,Nb,Nk);

    if( MPI_File_open( MPI_COMM_WORLD, entrada_file, 
            MPI_MODE_RDONLY, 
            MPI_INFO_NULL, &entrada_handle ) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na abertura do arquivo de entrada (%s).\n",entrada_file);
        }
        goto help;
    }

    MPI_File_get_size(entrada_handle,&entrada_bytes);


    if( MPI_File_open( MPI_COMM_WORLD, saida_file, 
            MPI_MODE_RDWR | MPI_MODE_CREATE | MPI_MODE_EXCL, 
            MPI_INFO_NULL, &saida_handle ) != MPI_SUCCESS )
    {
        if( rank == 0 )
        {
            printf("Erro na criacao do arquivo de saida (%s).\n",saida_file);
            printf("Uma possivel causa eh que o arquivo ja exista.\n");
        }
        goto help;
    }

    numero_blocos = ( entrada_bytes / (Nb*4) );
    blocos_processo = numero_blocos / worldsize;

    if( operacao == ENCRIPTAR || operacao == DECRIPTAR )
    {
        bloco_byte_inicio = 4*Nb*blocos_processo*rank;
        bloco_byte_fim = 4*Nb*blocos_processo*(rank+1);

        tabela = novaTabela(Nb*4);

        for( iterador = bloco_byte_inicio ; iterador < bloco_byte_fim ; iterador += (4*Nb) )
        {
            if( MPI_File_read_at(entrada_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao ler do arquivo de entrada (%s).\n",entrada_file);
                }
                goto help;
            }

            novaOcorrenciaTabela(tabela,bloco,iterador);
        }
        
        iterador = 4*Nb*blocos_processo*worldsize + 4*Nb*rank;
        if( iterador < numero_blocos*4*Nb )
        {
            if( MPI_File_read_at(entrada_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao ler do arquivo de entrada (%s).\n",entrada_file);
                }
                goto help;
            }

            novaOcorrenciaTabela(tabela,bloco,iterador);
        }
        else if( operacao == ENCRIPTAR  &&  iterador == numero_blocos*4*Nb )
        {
            if( MPI_File_read_at(entrada_handle,iterador,bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao ler do arquivo de entrada (%s).\n",entrada_file);
                }
                goto help;
            }
            bloco[ 4*Nb - 1 ] = (octeto)(entrada_bytes - numero_blocos*4*Nb);
            novaOcorrenciaTabela(tabela,bloco,iterador);
        }


        if( juntar == 1 )
        {
            tabelaEmpacotada = (octeto*)malloc( entrada_bytes );
            if( rank == 0 ) /* Mestre que vai concatenar todas as arvores*/
            {
                for(proc=1;proc<worldsize;proc++)
                {
                    MPI_Recv( tabelaEmpacotada, entrada_bytes, MPI_BYTE, MPI_ANY_SOURCE, TAG_TABELA_EMPACOTADA, MPI_COMM_WORLD, &status );
                    desempacotarInserindo(tabelaEmpacotada,tabela);
                }
                
                tamanho_tabela = numeroBlocosTabela(tabela);

                no_proc = (tamanho_tabela / worldsize);
                no_resto = (tamanho_tabela % worldsize);
                
                tabela2 = novaTabela(Nb*4);
                for(proc=1;proc<worldsize;proc++)
                {
                    for(i=0;i<no_proc;i++)
                    {
                        soInsiraTabela(tabela2, popLastTabelaNode(tabela) );
                    }
                    if( no_resto > 1 )
                    {
                        soInsiraTabela(tabela2, popLastTabelaNode(tabela) );
                        no_resto--;
                    }
                    empacotarTabela(tabela2,tabelaEmpacotada);

                    MPI_Send(tabelaEmpacotada,numeroBytesTabela(tabela2), MPI_BYTE, proc, TAG_TABELA_EMPACOTADA_2, MPI_COMM_WORLD );

                    destruirArvore(tabela2->root);
                    tabela2->root = NULL;
                }
                destruirTabela(tabela2);
            }
            else
            {
                empacotarTabela(tabela,tabelaEmpacotada);
                MPI_Send(tabelaEmpacotada,numeroBytesTabela(tabela), MPI_BYTE, 0, TAG_TABELA_EMPACOTADA, MPI_COMM_WORLD );
                destruirArvore(tabela->root);
                tabela->root = NULL;

                MPI_Recv( tabelaEmpacotada, entrada_bytes, MPI_BYTE, 0, TAG_TABELA_EMPACOTADA_2, MPI_COMM_WORLD, &status );
                desempacotarInserindo(tabelaEmpacotada,tabela);
            }
            free(tabelaEmpacotada);
        }

        if( operacao == ENCRIPTAR )
            MPI_File_set_size(saida_handle,(MPI_Offset)( (numero_blocos+1)*(Nb*4) ) );
        else if( operacao == DECRIPTAR )
            MPI_File_set_size(saida_handle,entrada_bytes);

        tamanho_tabela = numeroBlocosTabela(tabela);
        for( i=0 ; i<tamanho_tabela ; i++ )
        {
            node = popLastTabelaNode(tabela);
//          memcpy (bloco,node->bloco,4*Nb);

            if( operacao == ENCRIPTAR )
                AES_encriptar_bloco(node->bloco,Nb,chave,Nr);
            else if( operacao == DECRIPTAR )
                AES_decriptar_bloco(node->bloco,Nb,chave,Nr);

            indice = node->ocorrencias;
            while( indice != NULL )
            {
                if( MPI_File_write_at(saida_handle,indice->indice,node->bloco,(4*Nb),MPI_BYTE,&status) != MPI_SUCCESS )
                {
                    if( rank == 0 )
                    {
                        printf("Erro ao escrever no arquivo de saida (%s).\n",saida_file);
                    }
                    goto help;
                }
                indice = indice->next;
            }
            destruirArvore(node);
        }
        destruirTabela(tabela);

        if( operacao == DECRIPTAR )
        {
            MPI_Barrier( MPI_COMM_WORLD ); /*Barreira q impede q alguem leia antes do valor decriptografado ser escrito */

            if( MPI_File_read_at(saida_handle,entrada_bytes-1,bloco,1,MPI_BYTE,&status) != MPI_SUCCESS )
            {
                if( rank == 0 )
                {
                    printf("Erro ao realizar leitura no arquivo de saida (%s).\n",saida_file);
                }
                goto help;
            }

            MPI_Barrier( MPI_COMM_WORLD ); /* Barreira q impede q alqum processo trunque o arquivo antes de outro processo ler*/

            MPI_File_set_size(saida_handle,entrada_bytes - 4*Nb + bloco[0]);
        }

        if( rank == 0 && verbose==1)
        {
            if( operacao == ENCRIPTAR )
                printf("A encriptacao do arquivo foi realizada com sucesso.\n");
            else if( operacao == DECRIPTAR )
                printf("A decriptacao do arquivo foi realizada com sucesso.\n");
        }
    }

    goto finalizando;

sempar:
    if( rank == 0 )
    {
        printf("Sem par correspondente para a opcao %s.\n",argv[itr]);
    }

help:
    if( rank == 0 )
    {
        printf("Use a opcao --help para melhor entendimento do uso da aplicacao.\n");
    }

finalizando:
    MPI_Finalize( );
    return 0;
}
예제 #25
0
파일: iotest.c 프로젝트: RWTH-OS/MP-MPICH
int main(int argc, char **argv)
{
    int *buf, i, rank, nints, flag;
	size_t len;
    char *filename, *tmp;
    MPI_File fh;
    MPI_Status status;

    MPI_Init(&argc,&argv);
    MPI_Comm_rank(MPI_COMM_WORLD, &rank);

/* process 0 takes the file name as a command-line argument and 
   broadcasts it to other processes */
    if (!rank) {
	i = 1;
	while ((i < argc) && strcmp("-fname", *argv)) {
	    i++;
	    argv++;
	}
	if (i >= argc) {
	    printf("\n*#  Usage: %s -fname filename\n\n", argv[0]);
	    MPI_Abort(MPI_COMM_WORLD, 1);
	}
	argv++;
	len = strlen(*argv);
	filename = (char *) malloc(len+10);
	strcpy(filename, *argv);
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	MPI_Bcast(filename, (int)len+10, MPI_CHAR, 0, MPI_COMM_WORLD);
    }
    else {
	MPI_Bcast(&len, 1, MPI_INT, 0, MPI_COMM_WORLD);
	filename = (char *) malloc(len+10);
	MPI_Bcast(filename, (int)len+10, MPI_CHAR, 0, MPI_COMM_WORLD);
    }
    

    buf = (int *) malloc(SIZE);
    nints = SIZE/sizeof(int);
    for (i=0; i<nints; i++) buf[i] = rank*100000 + i;

    /* each process opens a separate file called filename.'myrank' */
    tmp = (char *) malloc(len+10);
    strcpy(tmp, filename);
    sprintf(filename, "%s.%d", tmp, rank);

    MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_CREATE | MPI_MODE_RDWR,
		   MPI_INFO_NULL, &fh);
    MPI_File_set_view(fh, 0, MPI_INT, MPI_INT, "native", MPI_INFO_NULL);
    MPI_File_write(fh, buf, nints, MPI_INT, &status);
    MPI_File_close(&fh);

    /* reopen the file and read the data back */

    for (i=0; i<nints; i++) buf[i] = 0;
    MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_CREATE | MPI_MODE_RDWR, 
                  MPI_INFO_NULL, &fh);
    MPI_File_set_view(fh, 0, MPI_INT, MPI_INT, "native", MPI_INFO_NULL);
    MPI_File_read(fh, buf, nints, MPI_INT, &status);
    MPI_File_close(&fh);

    /* check if the data read is correct */
    flag = 0;
    for (i=0; i<nints; i++) 
	if (buf[i] != (rank*100000 + i)) {
	    printf("Process %d: error, read %d, should be %d\n", rank, buf[i], rank*100000+i);
	    flag = 1;
	}

    if (!flag) printf("Process %d: data read back is correct\n", rank);

    free(buf);
    free(filename);
    free(tmp);

    MPI_Finalize(); 
    return 0;
}
예제 #26
0
int main(int argc, char *argv[])
{
    int rank, size;
    const int N = atoi(argv[1]);

//    printf("Number of testcase = %d\n", N);

    MPI_Init (&argc, &argv);

    double start_time, end_time;
    MPI_Comm_rank (MPI_COMM_WORLD, &rank);
    MPI_Comm_size (MPI_COMM_WORLD, &size);
 //   printf("My rank is %d \n", rank); 
    
    //start_time = MPI_Wtime();

    MPI_File fin, fout;
    MPI_Status status;
    int *root_arr;
    int max_arr_size = size > N ? size : N;
    int ret = MPI_File_open(MPI_COMM_WORLD, argv[2], 
                MPI_MODE_RDONLY, MPI_INFO_NULL, &fin);
    
    if (rank == ROOT) {
        root_arr = new int[max_arr_size+3];
//        printf("Enter rank 0 statement ... \n");
        MPI_File_read(fin, root_arr, N, MPI_INT, &status);
        
/*        for (int i = 0; i < N; ++i)
             printf("[START] [Rank %d] root_arr[%d] = %d\n", rank, i, root_arr[i]); 
        printf("Out Rank 0 statement ... \n");
*/    } 
    MPI_File_close(&fin);
    
    MPI_Barrier(MPI_COMM_WORLD); // Wait for rank0 to read file 
    
    int rank_num = size > N ? N : size;
    const int LAST = rank_num - 1;
    int num_per_node = N / rank_num;
    int *local_arr;
    int num_per_node_diff = N - num_per_node * rank_num;
    int diff = num_per_node_diff;
    bool has_remain = false;
    bool has_remain_rank = rank_num % 2 ? true : false;
    
    if (num_per_node_diff > 0) {
        // Send remaining elements to size - 1
        has_remain = true;
        if (rank == ROOT) {
            MPI_Send(root_arr + N - diff, diff, MPI_INT, LAST, 0, MPI_COMM_WORLD); 
        } else if (rank == LAST) {
            // Handle special case
            num_per_node += num_per_node_diff;
            local_arr = new int[num_per_node+1];
            MPI_Recv(local_arr + num_per_node - diff, diff, 
                    MPI_INT, 0, MPI_ANY_TAG, MPI_COMM_WORLD, &status);
        }
    } else if(rank == rank_num - 1) {
        local_arr = new int[num_per_node+1];
    }

    MPI_Barrier(MPI_COMM_WORLD); // Wait for rank0 to read file 
    if (rank != rank_num - 1)
        local_arr = new int[num_per_node+1];
	

    // MPI_Scatter (send_buf, send_count, send_type, recv_buf, recv_count, recv_type, root, comm)
	if (rank < LAST)
        MPI_Scatter(root_arr, num_per_node, MPI_INT, local_arr, 
                    num_per_node, MPI_INT, ROOT, MPI_COMM_WORLD);
    else
        MPI_Scatter(root_arr, num_per_node-diff, MPI_INT, local_arr, 
                    num_per_node-diff, MPI_INT, ROOT, MPI_COMM_WORLD);
    
   // printf("[Rank %d] num_per_node_size = %d\n" ,rank, num_per_node); 
    MPI_Barrier(MPI_COMM_WORLD);
/*
    for (int i = 0; i < num_per_node; ++i)
        printf("[BEFORE] [Rank %d] local_arr[%d] = %d\n", rank, i, local_arr[i]); 
*/
if (rank < rank_num) {
    int round = N % 2 ? N+1 : N;
    for (int i = 0; i < round; ++i) {
        // bool need_send = (i & 1)^(num_per_node & 1);
         bool need_send = true;
        for (int j = i & 1; j < num_per_node; j+=2) {
            if (j+1 < num_per_node) {
                if (local_arr[j] > local_arr[j+1]) 
                    swap(local_arr[j], local_arr[j+1]);        
            } else if (j-1 >= 0) {
                if (local_arr[j-1] > local_arr[j]) 
                    swap(local_arr[j-1], local_arr[j]);
            }            
        }
        
        int element;
        bool recv_side;
        if (i & 1) {
            if (rank & 1)   recv_side = true;
            else    recv_side = false;
        } else {
            if (rank & 1)   recv_side = false;
            else    recv_side = true;
        }
        // if (recv_side) printf("i = %d, rank = %d, recv\n", i, rank);
        // if (!recv_side) printf("i = %d, rank = %d, send\n", i, rank);

        if (recv_side) {
            if (rank != ROOT) {
                /* Receive element */
                MPI_Recv(&element, 1, MPI_INT, rank - 1, 0, MPI_COMM_WORLD, &status);  
                MPI_Send(local_arr, 1, MPI_INT, rank - 1, 0, MPI_COMM_WORLD); 
                if (element > local_arr[0])
                    swap(element, local_arr[0]);
            }
        } else {
            /* Send element */
            if (rank != LAST) {
                element = local_arr[num_per_node-1];
                MPI_Send(&element, 1, MPI_INT, rank + 1, 0, MPI_COMM_WORLD); 
                MPI_Recv(&element, 1, MPI_INT, rank + 1, 0, MPI_COMM_WORLD, &status);  
                if (element < local_arr[num_per_node-1])
                    swap(element, local_arr[num_per_node-1]);
            }
        }
    }
}
/* 
    MPI_Barrier(MPI_COMM_WORLD);
    for (int i = 0; i < num_per_node; ++i)
        printf("[AFTER] [Rank %d] local_arr[%d] = %d\n", rank, i, local_arr[i]); 
    
    printf("rank %d is arrived\n", rank);
*/    
    MPI_Barrier(MPI_COMM_WORLD); // Wait for rank0 to read file 

    int *ans;
    if (rank == ROOT) { 
        ans = new int[max_arr_size+3];
    }

    if (has_remain && rank == rank_num - 1) {
        MPI_Gather(local_arr, num_per_node - diff, MPI_INT, 
            ans, num_per_node - diff, MPI_INT, ROOT, MPI_COMM_WORLD);
        
        MPI_Send(local_arr + num_per_node - diff, diff,
                         MPI_INT, ROOT, 0, MPI_COMM_WORLD); 
    }
    else {
        MPI_Gather(local_arr, num_per_node, MPI_INT, ans, num_per_node, 
                                        MPI_INT, ROOT, MPI_COMM_WORLD);
        if (has_remain && rank == ROOT)
            MPI_Recv(ans + N - diff, diff, MPI_INT, LAST, 
                        MPI_ANY_TAG, MPI_COMM_WORLD, &status);  
    }


    MPI_Barrier(MPI_COMM_WORLD);
    MPI_File_open(MPI_COMM_WORLD, argv[3], 
        MPI_MODE_RDWR | MPI_MODE_CREATE, MPI_INFO_NULL, &fout);
    
    if (rank == ROOT) {
        MPI_File_write(fout, ans, N, MPI_INT, &status);
        for (int i = 0; i < N; ++i) {
         //   printf("[FINAL] [Rank %d] ans[%d] = %d\n", rank, i, ans[i]);
        }
    }
    MPI_File_close(&fout);
    
   // printf("rank %d is arrived\n", rank);
    MPI_Barrier(MPI_COMM_WORLD);
    
    if (rank != 0) {
        delete []  local_arr;
     //   printf("[FREE] [RANK %d] SUCCESS FREE\n", rank);
    } else {
        delete [] ans;
        delete [] root_arr;
        delete [] local_arr;;
    }
    MPI_Finalize();
     
    return 0;
}
예제 #27
0
파일: IO.c 프로젝트: BenByington/Proteus
/*
 * This function is just the inverse of writeSpatial.  See comments for above
 * function.
 */
void readSpatial(field * f, char * name)
{
    int i,j,k,l,m;
    debug("Reading spatial data from file %s\n", name);

    PRECISION * sndbuff = 0;
    PRECISION * rcvbuff = 0;

    if(io_node)
    {
        sndbuff = (PRECISION *)malloc(nx * ny * nz_layers * sizeof(PRECISION));
        rcvbuff = (PRECISION *)malloc(nx * ny * nz_layers * sizeof(PRECISION));
        trace("Total local data will be %d PRECISIONs\n", nx*ny*nz_layers);

        //do a mpi IO operation
        //TODO: revisit MPI_MODE_SEQUENTIAL and MPI_INFO_NULL to make sure these are what we want
        MPI_File fh;
        debug("Reading file\n");
        MPI_File_open(fcomm, name, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);

        int disp = 0;
        for(i = 0; i < my_io_layer; i++)
        {
            for(j = io_layers[i].min; j <= io_layers[i].max; j++)
            {
                disp += all_z[j].width;
            }
        }
        disp *= nx * ny * sizeof(PRECISION);
        trace("Our file view starts at displacement %d\n", disp);
        trace("Setting view\n");
        MPI_File_set_view(fh, disp, MPI_PRECISION, MPI_PRECISION, "native", MPI_INFO_NULL);
        trace("Reading file\n");
        MPI_File_read(fh, sndbuff, nx * ny * nz_layers, MPI_PRECISION, MPI_STATUS_IGNORE );

        MPI_File_close(&fh);
    

        debug("transposing the data for scatter to compute nodes\n");
        //rcvbuff is [l][h][vz][hx][y]
        //sndbuff [l][vz][y][h][hx]
        int indexr = 0;
        int indexs = 0;
        for(i = 0; i < io_layers[my_io_layer].width; i++)
        {
            for(j = 0; j < hdiv; j++)
            {
                int vz = all_z[i + io_layers[my_io_layer].min].width;
                int vzmin = all_z[i + io_layers[my_io_layer].min].min;
                int vzstart = all_z[io_layers[my_io_layer].min].min;
                for(k = 0; k < vz; k++)
                {
                    int hx = all_x[j].width;
                    int hxmin = all_x[j].min;
                    for(l = 0; l < hx; l++)
                    {
                        for(m = 0; m < ny; m++)
                        {
                            indexs = ((k + vzmin - vzstart)*ny + m)*nx + l + hxmin;

                            rcvbuff[indexr] = sndbuff[indexs];
                            indexr++;
                        }
                    }
                }
            }
        }
    }

    int rcvcnt;
    int displs[iosize+1];
    int sndcounts[iosize];
    debug("scattering data to the compute nodes\n");
    if(compute_node)
    {
        rcvcnt = my_x->width * my_z->width * ny;
        trace("I expect to receive %d PRECISIONs\n", rcvcnt);
        MPI_Scatterv(0, 0, 0, MPI_PRECISION, f->spatial, rcvcnt, MPI_PRECISION, 0, iocomm);
    }
    else if(io_node)
    {
        displs[0] = 0;
        displs[1] = 0;
        sndcounts[0] = 0;

        int * pidspls = displs + 2;
        int * pisndcounts = sndcounts+1;
        for(i = io_layers[my_io_layer].min; i <= io_layers[my_io_layer].max; i++)
        {
            for(j = 0; j < hdiv; j++)
            {
                *pisndcounts = all_x[j].width * all_z[i].width * ny;
                *pidspls = *(pidspls-1) + *pisndcounts;
                trace("Sending %d PRECISIONs from displacement %d to proc %d\n", *pisndcounts, *pidspls, i*j);
                pidspls++;
                pisndcounts++;
            }
        }
        MPI_Scatterv(rcvbuff, sndcounts, displs, MPI_PRECISION, 0, 0, MPI_PRECISION, 0, iocomm);
    }

    if(io_node)
    {
        free(rcvbuff);
        free(sndbuff);
    }
    debug("Reading from file done\n");
}
예제 #28
0
파일: 1.c 프로젝트: jonnyguio/progparela
int main(int argc, char **argv) {

  double *xy;
  double mySUMx, mySUMy, mySUMxy, mySUMxx, SUMx, SUMy, SUMxy,
         SUMxx, SUMres, res, slope, y_intercept, y_estimate,
         begin, end;
  int i, j, n, myid, numprocs, naverage, nremain, mypoints,
    sizeFile, ret;
  /*int new_sleep (int seconds);*/
  MPI_Status istatus;
  MPI_Datatype MPI_POINT;
  MPI_File infile;
  MPI_Offset ishift;

  MPI_Init(&argc, &argv);
  MPI_Comm_rank (MPI_COMM_WORLD, &myid);
  MPI_Comm_size (MPI_COMM_WORLD, &numprocs);

  MPI_Type_contiguous(2, MPI_DOUBLE, &MPI_POINT);
  MPI_Type_commit(&MPI_POINT);

  ret = MPI_File_open(MPI_COMM_WORLD, "1.in", MPI_MODE_RDONLY, MPI_INFO_NULL, &infile);
  if (ret == 0)
    printf("Arquivo aberto com sucesso no processo %d \n", myid);
  else {
    printf("Arquivo aberto com erro no processo %d \n", myid);
    MPI_Abort(MPI_COMM_WORLD, 1);
  }

  /* ----------------------------------------------------------
   * Step 1: Process 0 reads data and sends the value of n
   * ---------------------------------------------------------- */


  MPI_File_seek(infile, 0, MPI_SEEK_SET);
  ret = MPI_File_read(infile, &n, 1, MPI_INT, &istatus);

  if (ret == 0)
    printf("Arquivo lido com sucesso no processo %d \n", myid);
  else {
    printf("Arquivo lido com erro no processo %d \n", myid);
    MPI_Abort(MPI_COMM_WORLD, 1);
  }

  naverage = n / numprocs;
  nremain = n % numprocs;

//  printf("%d - %d - %d - %d - %d\n", myid, n, naverage, nremain, 2 * (naverage + nremain));

  xy = (double *) malloc (2 * (naverage + nremain) * sizeof(double));

  /*if (myid == 0) {
    printf ("Number of processes used: %d\n", numprocs);
    printf ("-------------------------------------\n");
    printf ("The x coordinates on worker processes:\n");

    fscanf (infile, "%d", &n);
    x = (double *) malloc (n*sizeof(double));
    y = (double *) malloc (n*sizeof(double));
    for (i=0; i<n; i++)
      fscanf (infile, "%lf %lf", &x[i], &y[i]);
    for (i=1; i<numprocs; i++)
      MPI_Send (&n, 1, MPI_INT, i, 10, MPI_COMM_WORLD);
  }
  else {
    MPI_Recv (&n, 1, MPI_INT, 0, 10, MPI_COMM_WORLD, &istatus);
    x = (double *) malloc (n*sizeof(double));
    y = (double *) malloc (n*sizeof(double));
  }*/
  /* ---------------------------------------------------------- */

  /* ----------------------------------------------------------
   * Step 2: Process 0 sends subsets of x and y
   * ---------------------------------------------------------- */

  if (myid == 0)
    GET_TIME(begin);
  ishift = myid * naverage;
  mypoints = (myid < numprocs - 1) ? naverage : naverage + nremain;

  //MPI_File_set_view(infile, ishift, MPI_POINT, MPI_DOUBLE, "native", MPI_INFO_NULL);
  MPI_File_seek(infile, ishift * sizeof(double) * 2, MPI_SEEK_CUR);
  MPI_File_read(infile, &xy[0], mypoints, MPI_POINT, &istatus);

  /*for (i = 0; i < mypoints * 2; i += 2) {
      printf("(%d) %d: ", myid, i);
      printf("%.0lf ", xy[i]);
      printf("%.0lf\n", xy[i + 1]);
  }
  printf("\n");*/

  /*if (myid == 0) {
    for (i=1; i<numprocs; i++) {
      ishift = i * naverage;
      mypoints = (i < numprocs - 1) ? naverage : naverage + nremain;
      MPI_Send (&ishift, 1, MPI_INT, i, 1, MPI_COMM_WORLD);
      MPI_Send (&mypoints, 1, MPI_INT, i, 2, MPI_COMM_WORLD);
      MPI_Send (&x[ishift], mypoints, MPI_DOUBLE, i, 3, MPI_COMM_WORLD);
      MPI_Send (&y[ishift], mypoints, MPI_DOUBLE, i, 4, MPI_COMM_WORLD);
    }
  }
  else {
    MPI_Recv (&ishift, 1, MPI_INT, 0, 1, MPI_COMM_WORLD, &istatus);
    MPI_Recv (&mypoints, 1, MPI_INT, 0, 2, MPI_COMM_WORLD, &istatus);
    MPI_Recv (&x[ishift], mypoints, MPI_DOUBLE, 0, 3, MPI_COMM_WORLD,
	      &istatus);
    MPI_Recv (&y[ishift], mypoints, MPI_DOUBLE, 0, 4, MPI_COMM_WORLD,
	      &istatus);
    printf ("id %d: ", myid);
    for (i=0; i<n; i++) printf("%4.2lf ", x[i]);
    printf ("\n");
  }*/

  /* ----------------------------------------------------------
   * Step 3: Each process calculates its partial sum
   * ---------------------------------------------------------- */
  mySUMx = 0; mySUMy = 0; mySUMxy = 0; mySUMxx = 0;
  if (myid == 0) {
    ishift = 0;
    mypoints = naverage;
  }
  for (j = 0; j < mypoints * 2; j += 2) {
    mySUMx += xy[j];
    mySUMy += xy[j + 1];
    mySUMxy += xy[j] * xy[j + 1];
    mySUMxx += xy[j] * xy[j];
  }

//  printf("%d:\t%lf - %lf - %lf - %lf\n", myid, mySUMx, mySUMy, mySUMxy, mySUMxx);

  /* ----------------------------------------------------------
   * Step 4: Process 0 receives partial sums from the others
   * ---------------------------------------------------------- */

   MPI_Reduce(&mySUMx, &SUMx, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&mySUMy, &SUMy, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&mySUMxy, &SUMxy, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);
   MPI_Reduce(&mySUMxx, &SUMxx, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD);

   if (myid == 0)
     GET_TIME(end);

  /*if (myid != 0) {
    MPI_Send (&mySUMx, 1, MPI_DOUBLE, 0, 5, MPI_COMM_WORLD);
    MPI_Send (&mySUMy, 1, MPI_DOUBLE, 0, 6, MPI_COMM_WORLD);
    MPI_Send (&mySUMxy, 1, MPI_DOUBLE, 0, 7, MPI_COMM_WORLD);
    MPI_Send (&mySUMxx, 1, MPI_DOUBLE, 0, 8, MPI_COMM_WORLD);
	    }
  else {
    SUMx = mySUMx; SUMy = mySUMy;
    SUMxy = mySUMxy; SUMxx = mySUMxx;
    for (i=1; i<numprocs; i++) {
      MPI_Recv (&mySUMx, 1, MPI_DOUBLE, i, 5, MPI_COMM_WORLD, &istatus);
      MPI_Recv (&mySUMy, 1, MPI_DOUBLE, i, 6, MPI_COMM_WORLD, &istatus);
      MPI_Recv (&mySUMxy, 1, MPI_DOUBLE, i, 7, MPI_COMM_WORLD, &istatus);
      MPI_Recv (&mySUMxx, 1, MPI_DOUBLE, i, 8, MPI_COMM_WORLD, &istatus);
      SUMx = SUMx + mySUMx;
      SUMy = SUMy + mySUMy;
      SUMxy = SUMxy + mySUMxy;
      SUMxx = SUMxx + mySUMxx;
    }
  }*/

  /* ----------------------------------------------------------
   * Step 5: Process 0 does the final steps
   * ---------------------------------------------------------- */
  if (myid == 0) {
    slope = (SUMx * SUMy - n * SUMxy ) / (SUMx * SUMx - n * SUMxx);
    y_intercept = (SUMy - slope * SUMx) / n;

    printf ("\n");
    printf ("The linear equation that best fits the given data:\n");
    printf ("       y = %6.2lfx + %6.2lf\n", slope, y_intercept);
    printf ("--------------------------------------------------\n");
    printf ("   Original (x, y)     Estimated y     Residual\n");
    printf ("--------------------------------------------------\n");
    SUMres = 0;
  }
  MPI_Bcast(&slope, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
  MPI_Bcast(&y_intercept, 1, MPI_DOUBLE, 0, MPI_COMM_WORLD);
  for (j = 0; j < numprocs; j++) {
    MPI_Barrier(MPI_COMM_WORLD);
    if (j == myid) {
      SUMres = 0;
      for (i = 0; i < mypoints * 2; i += 2) {
        y_estimate = slope * xy[i] + y_intercept;
        res = xy[i + 1] - y_estimate;
        SUMres = SUMres + res * res;
        printf("   (%6.2lf %6.2lf)      %6.2lf       %6.2lf\n", xy[i], xy[i + 1], y_estimate, res);
      }
    }
  }
  MPI_Barrier(MPI_COMM_WORLD);
  if (myid == 0) {
    printf("--------------------------------------------------\n");
    printf("Residual sum = %6.2lf\n", SUMres);
    printf("Time: %lf\n", end - begin);
  }

  MPI_File_close(&infile);

  MPI_Finalize();
}
예제 #29
0
int main (int argc, char *argv[])
{
    
    int proc_num, my_rank, len;
    int i, j, k;
    int req_size, repeat_time, stride, dbg_print;
    double start_time, elapsed_time;
    double all_time_max, all_time_avg, all_time_min;
    char* read_data;
    char filename[256];

    double timeStat[kOST][10];
    double globalTimeStat[kOST][10];

    MPI_Status status;
    MPI_File fh;
    MPI_Datatype contig_type;
    MPI_Datatype stride_type;
    MPI_Offset OST_proc, start_pos, stripe_size, total_size_proc;

    MPI_Init(&argc, &argv);
    
    // get the number of procs and rank in the comm
    MPI_Comm_size(MPI_COMM_WORLD, &proc_num);
    MPI_Comm_rank(MPI_COMM_WORLD, &my_rank);
    
    if(argc < 5) {
        if (my_rank == 0) {
            printf("Wrong argument number!\n");
            printf("Use %s filepath single_read_size(MB) repeat_time\n", argv[0]);
        }
        MPI_Finalize();
        exit(-1);
    }

    req_size    = atoi(argv[2]);
    repeat_time = atoi(argv[3]);
    OST_proc    = atoi(argv[4]);

    req_size   *= (1048576/OST_proc);    // Convert to byte

    /* total_size_proc = atoi(argv[3]); */
    /* stripe_size     = atoi(argv[4]); */

    /* if (argc == 7) { */
    /*     dbg_print = atoi(argv[6]); */
    /* } */

    /* repeat_time     = (int)(total_size_proc / req_size); */
    /* start_pos       = (my_rank/OST_proc) * stripe_size; */
    /* stride          = (int)(kOST * stripe_size / req_size); */

    read_data       = (char*)malloc(req_size*sizeof(char));
     
    MPI_Type_contiguous( req_size, MPI_CHAR, &contig_type);
    MPI_Type_commit(&contig_type);

    /* MPI_Type_vector(repeat_time, 1, stride, contig_type, &stride_type); */
    /* MPI_Type_commit(&stride_type); */

    MPI_Offset disp;

    for (j = 0; j < repeat_time; j++) {
        disp = j * req_size * (my_rank+1);
        
        /* for (i = 0; i < kOST; i++) { */
        for (i = 0; i < 1; i++) {
        
            sprintf(filename, "%s/%d/temp1.bin", argv[1], my_rank);
            /* sprintf(filename, "%s/%d/temp.bin", argv[1], i); */
            /* printf("%s\n", filename); */

            if (OST_proc == 1) {
                MPI_File_open(MPI_COMM_SELF, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
            }
            else {
                MPI_File_open(MPI_COMM_WORLD, filename, MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
            }
            if(fh==NULL){
                if (my_rank == 0) { printf("File not exist\n"); }
                exit(-1);
            }
            
            /* MPI_File_set_view( fh, disp, MPI_CHAR, contig_type, "native", MPI_INFO_NULL); */
            MPI_Barrier(MPI_COMM_WORLD);
            start_time = MPI_Wtime();

            /* printf("disp:%lld, size:%d\n", disp, req_size); */
            MPI_File_read( fh, read_data, req_size, MPI_CHAR, &status );
            /* MPI_File_read_all( fh, read_data, req_size, MPI_CHAR, &status ); */
         
            elapsed_time = MPI_Wtime() - start_time;
            timeStat[i][j] = elapsed_time;
            printf("Rank %d, reading from %s, start %d, time %.6f\n", my_rank, filename, disp, elapsed_time);
            
            MPI_File_close(&fh);
        }
    }

    /* MPI_Reduce(&elapsed_time, &all_time_max, 1, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD); */
    /* MPI_Reduce(&elapsed_time, &all_time_min, 1, MPI_DOUBLE, MPI_MIN, 0, MPI_COMM_WORLD); */
    /* MPI_Reduce(&elapsed_time, &all_time_avg, 1, MPI_DOUBLE, MPI_SUM, 0, MPI_COMM_WORLD); */
    
    int err; 
    err = MPI_Reduce(timeStat, globalTimeStat, kOST*10, MPI_DOUBLE, MPI_MAX, 0, MPI_COMM_WORLD);
    if (err != MPI_SUCCESS) {
        printf("Error Reducing, exiting...\n");
        exit(-1);
    }

/*     if (my_rank == 0) { */
/*         for (j = 0; j < kOST; j++) { */
                
/*             printf("%d", j); */
/*             for (i = 0; i < repeat_time; i++) { */
/*                  printf(", %.6f", globalTimeStat[j][i]); */
/*             } */
/*             printf("\n"); */
/*         } */
/*     } */
   
   
    free(read_data);
    MPI_Type_free(&contig_type);
    /* MPI_Type_free(&stride_type); */
    
    MPI_Finalize();

    return 0;
}
예제 #30
0
int main (int argc, char *argv[])
{
    int procNum, worldRank, len;
    int ioProc, ioRank, scatterProc, scatterRank;
    int i, j, k;
    int ioProcPerNode, dbg_print;
    int colorIO, colorScatter, scatterGroupSize;

    double *ioData, *myData;
    double ioStartTime, elapsedTime, scatterStartTime, scatterTime, totalTime;
    double allTimeMax, allTimeAvg, allTimeMin;
    double timeStat[kOST], globalTimeStat[kOST];
    double mySum;
    

    MPI_Offset   disp, myReqSize, stripeSize, ioReadSize;
    MPI_Datatype strided;
    MPI_Status   status;
    MPI_File     fh;
    MPI_Comm     MY_COMM_IO, MY_COMM_SCATTER;

    // Init
    MPI_Init(&argc, &argv);
       
    if(argc < 3) {
        if (worldRank == 0) {
            fprintf(stderr, "Wrong argument number!\n");
            fprintf(stderr, "Usage:\n%s filepath single_read_size(MB) I/O_proc_per_node\n", argv[0]);
        }
        MPI_Finalize();
        exit(-1);
    }
 
    sprintf(filename, "%s", argv[1]);
    myReqSize     = atoi(argv[2]);
    myReqSize    *= 1048576;
    ioProcPerNode = atoi(argv[3]);
    stripeSize    = 2147483648;     //2G

    // adjust to double from char
    myReqSize    /= sizeof(double);
    stripeSize   /= sizeof(double);
    // It turns out MPI seems to have a problem when stride size is greakter than 2148473647 with vector datatype,
    // which is then used in MPI_File_set_view, the first process reads less data than expected.
    // Current solution is to use basic type MPI_DOUBLE instead of MPI_BYTE, so the stride is 1/8 or original.

    scatterGroupSize = kCorePerNode / ioProcPerNode;
    ioReadSize       = myReqSize * scatterGroupSize;

    MPI_Type_vector(scatterGroupSize, myReqSize, stripeSize, MPI_DOUBLE, &strided);
    MPI_Type_commit(&strided);
   
    /* printf("stripeSize: %lld\n", stripeSize); */

    // get the number of procs and rank in the comm
    MPI_Comm_size(MPI_COMM_WORLD, &procNum);
    MPI_Comm_rank(MPI_COMM_WORLD, &worldRank);

    // color for I/O ranks and scatter ranks
    colorIO      = worldRank % scatterGroupSize == 0 ? 0 : 1;
    colorScatter = (int)(worldRank / scatterGroupSize);

    // debug print
    /* MPI_Barrier(MPI_COMM_WORLD); */
    /* printf("World rank: %d, colorIO: %d, colorScatter: %d\n", worldRank, colorIO, colorScatter); */

    // split to I/O ranks
    MPI_Comm_split(MPI_COMM_WORLD, colorIO, worldRank, &MY_COMM_IO);
    if (colorIO == 0) {
        MPI_Comm_size(MY_COMM_IO, &ioProc);
        MPI_Comm_rank(MY_COMM_IO, &ioRank);
    }

    // split to scatter groups
    MPI_Comm_split(MPI_COMM_WORLD, colorScatter, worldRank, &MY_COMM_SCATTER);
    MPI_Comm_size(MY_COMM_SCATTER, &scatterProc);
    MPI_Comm_rank(MY_COMM_SCATTER, &scatterRank);
    
    // debug print
    /* printf("World Rank: %d, IO Rank: %d, Scatter Rank:%d\n", worldRank, ioRank, scatterRank); */

    // local for all proc
    myData  = (double*)malloc(myReqSize * sizeof(double));
    if (myData == NULL) {
        fprintf(stderr, "Error allocating myData\n");
        MPI_Abort(MPI_COMM_WORLD, -1);
    }

    // actual I/O workers
    if (colorIO == 0) {
        //debug print
        /* printf("[%d] %dMB\n", worldRank, (int)(ioReadSize / 1048576)); */

        // I/O process needs more space
        ioData  = (double*)malloc(ioReadSize * sizeof(double));
        if (ioData == NULL) {
            fprintf(stderr, "Error allocating ioData\n");
            MPI_Abort(MPI_COMM_WORLD, -1);
        }

        // distribute the data read among I/O ranks
        disp = stripeSize * worldRank;

        MPI_File_open(MY_COMM_IO, argv[1], MPI_MODE_RDONLY, MPI_INFO_NULL, &fh);
        if(fh==NULL){
            if (worldRank == 0) { printf("File [%s]does not exist\n", argv[1]); }
            MPI_Abort(MPI_COMM_WORLD, -1);
        }
        
    
        // set file view
        disp = stripeSize * worldRank * sizeof(double);
        MPI_File_set_view( fh, disp, MPI_DOUBLE, strided, "native", MPI_INFO_NULL);


        MPI_Barrier(MY_COMM_IO);
        ioStartTime = MPI_Wtime();

        MPI_File_read( fh, ioData, ioReadSize, MPI_DOUBLE, &status );
 
        elapsedTime = MPI_Wtime() - ioStartTime;
        
        MPI_File_close(&fh);

        // debug print
        /* PrintData (ioData, worldRank, disp, ioReadSize); */
    }

    /* fprintf(stderr, "[%d] Finished data read\n[myReqSize] %lld\n", worldRank, myReqSize); */

    MPI_Barrier(MPI_COMM_WORLD);

    scatterStartTime = MPI_Wtime();

    // Scatter data from I/O ranks to COMM_SCATTER
    MPI_Scatter(ioData, myReqSize, MPI_DOUBLE, myData, myReqSize, MPI_DOUBLE, 0, MY_COMM_SCATTER);


    scatterTime = MPI_Wtime() - scatterStartTime;

    totalTime   = MPI_Wtime() - ioStartTime;
    // data correctness verification
    /* DataSum(myData, worldRank, myReqSize); */

    // stat for read time among I/O ranks
    if (colorIO == 0) {
        printf("%3d, %.6f\n", worldRank, elapsedTime);
        //printf("[%d] time %.6f, start offset: %lld, size: %llu\n", worldRank, elapsedTime, disp, ioReadSize);

        MPI_Reduce(&elapsedTime, &allTimeMax, 1, MPI_DOUBLE, MPI_MAX, 0, MY_COMM_IO);
        MPI_Reduce(&elapsedTime, &allTimeMin, 1, MPI_DOUBLE, MPI_MIN, 0, MY_COMM_IO);
        MPI_Reduce(&elapsedTime, &allTimeAvg, 1, MPI_DOUBLE, MPI_SUM, 0, MY_COMM_IO);
        allTimeAvg /= ioProcPerNode * procNum / kCorePerNode;
    }
   
    MPI_Barrier(MPI_COMM_WORLD);
 
    if (worldRank == 0) {
        double totalSizeMB = myReqSize * sizeof(double)/ (1024.0*1024.0) * procNum ;
        printf("[Read time] %f, %f, %f [agg rate]%.2fM/s\n", allTimeMin, allTimeAvg, allTimeMax, totalSizeMB / allTimeMax);
        printf("[Scatter time] %f, [agg rate] %.2fM/s\n", scatterTime, totalSizeMB / scatterTime);
        printf("[Total time] %f\n", totalTime);
    }
  
    if (colorIO == 0) {
        free(ioData);
        MPI_Comm_free(&MY_COMM_IO);
    }

    free(myData);
    MPI_Comm_free(&MY_COMM_SCATTER);

    MPI_Type_free(&strided);
    MPI_Finalize();

    return 0;
}