Example #1
0
QString cs8Variable::definition()
{

    return ( QString( "%1 %2" ).arg( type() ).arg( name() ) ) + ( allSizes() != QString() ? QString(
                                                                                                "[%1]" ).arg( allSizes() ) : "" );
}
int BlockMapToHandle(FILE * handle, const Epetra_BlockMap & map) {

  const Epetra_Comm & comm = map.Comm();
  int numProc = comm.NumProc();
  bool doSizes = !map.ConstantElementSize();

  if (numProc==1) {
    int * myElements = map.MyGlobalElements();
    int * elementSizeList = 0;
    if (doSizes) elementSizeList = map.ElementSizeList();
    return(writeBlockMap(handle, map.NumGlobalElements(), myElements, elementSizeList, doSizes));
  }

  int numRows = map.NumMyElements();
  
  Epetra_Map allGidsMap(-1, numRows, 0,comm);
  
  Epetra_IntVector allGids(allGidsMap);
  for (int i=0; i<numRows; i++) allGids[i] = map.GID(i);
  
  Epetra_IntVector allSizes(allGidsMap);
  for (int i=0; i<numRows; i++) allSizes[i] = map.ElementSize(i);
  
  // Now construct a Map on PE 0 by strip-mining the rows of the input matrix map.
  int numChunks = numProc;
  int stripSize = allGids.GlobalLength()/numChunks;
  int remainder = allGids.GlobalLength()%numChunks;
  int curStart = 0;
  int curStripSize = 0;
  Epetra_IntSerialDenseVector importGidList;
  Epetra_IntSerialDenseVector importSizeList;
  if (comm.MyPID()==0) {
    importGidList.Size(stripSize+1); // Set size of vector to max needed
    if (doSizes) importSizeList.Size(stripSize+1); // Set size of vector to max needed
  }
  for (int i=0; i<numChunks; i++) {
    if (comm.MyPID()==0) { // Only PE 0 does this part
      curStripSize = stripSize;
      if (i<remainder) curStripSize++; // handle leftovers
      for (int j=0; j<curStripSize; j++) importGidList[j] = j + curStart;
      curStart += curStripSize;
    }
    // The following import map will be non-trivial only on PE 0.
    Epetra_Map importGidMap(-1, curStripSize, importGidList.Values(), 0, comm);
    Epetra_Import gidImporter(importGidMap, allGidsMap);
    
    Epetra_IntVector importGids(importGidMap);
    if (importGids.Import(allGids, gidImporter, Insert)) return(-1); 
    Epetra_IntVector importSizes(importGidMap);
    if (doSizes) if (importSizes.Import(allSizes, gidImporter, Insert)) return(-1); 
    
    // importGids (and importSizes, if non-trivial block map)
    // now have a list of GIDs (and sizes, respectively) for the current strip of map.
    
    int * myElements = importGids.Values();
    int * elementSizeList = 0;
    if (doSizes) elementSizeList = importSizes.Values();
    // Finally we are ready to write this strip of the map to file
    writeBlockMap(handle, importGids.MyLength(), myElements, elementSizeList, doSizes);
  }
  return(0);
}
Example #3
0
// this is currently slower than it could be because we write in chunks (data.size()..) with barriers in between - instead we could take care of correct format of matrix on the read-in side and write everything at once, or assume same format of data and only do one barrier
void Storage::SaveDataFloatMPIBin(char* filename, vector<vector<float> > data, int mpiRank, int mpiSize, MPI_Comm comm)
{
	MPI_File fh;
	// assumes specific data distribution for processes

	MPI_File_delete(filename,MPI_INFO_NULL);
	MPI_File_open(comm,filename,MPI_MODE_RDWR|MPI_MODE_CREATE,MPI_INFO_NULL,&fh);
	int globalOffset = 0;

	// (!) Currently assumes same size of all items in data
	int size = 0;
	for(int i=0;i<data.size();i++) // assumes same nr items each process
	{
		size+= data[i].size();
	}

	vector<float> tempData(size);
	int index=0;
	for(int i=0;i<data.size();i++) // assumes same nr items each process
	{
		for(int j=0;j<data[i].size();j++)
		{
			tempData[index] = data[i][j];
			index++;
		}
	}

	vector<int> allSizes(mpiSize);
	MPI_Allgather(&size,1,MPI_INT,&allSizes[0],1,MPI_INT,comm);
	int startPos = 0;
	for(int j=0;j<mpiRank;j++)
		startPos+=allSizes[j];

	//for(int i=0;i<data.size();i++) // assumes same nr items each process
	//{
	//	/*int size = data[i].size(); // each item can be of different size
	//	vector<int> allSizes(mpiSize);
	//	MPI_Allgather(&size,1,MPI_INT,&allSizes[0],1,MPI_INT,comm);
	//	int startPos = 0;
	//	for(int j=0;j<mpiRank;j++)
	//		startPos+=allSizes[j];*/

	//	MPI_Status status;
	//	MPI_File_write_at(fh,(startPos+globalOffset)*sizeof(MPI_FLOAT),&data[i][0],data[i].size(),MPI_FLOAT,&status);
	//	//if(i==0)
	//	//MPI_File_write_at(fh,(startPos+globalStartPos),&data[i][0],data[i].size(),MPI_FLOAT,&status);

	//	for(int j=0;j<mpiSize;j++)
	//		globalOffset+=allSizes[j];
	//	//globalOffset = 0;
	//	//for(int j=mpiRank;j<mpiSize;j++)
	//	//	globalOffset+=allSizes[j];
	//}

	MPI_Status status;
	if(size>0)
		MPI_File_write_at(fh,(startPos+globalOffset)*sizeof(MPI_FLOAT),&tempData[0],size,MPI_FLOAT,&status);//&data[0][0],size,MPI_FLOAT,&status);
		//MPI_File_write_at(fh,(startPos+globalOffset)*sizeof(MPI_FLOAT),&data[0][0],size,MPI_FLOAT,&status);
	
	MPI_File_close(&fh);
}