int writeRowMatrix(FILE * handle, const Epetra_RowMatrix & A) { long long numRows_LL = A.NumGlobalRows64(); if(numRows_LL > std::numeric_limits<int>::max()) throw "EpetraExt::writeRowMatrix: numRows_LL > std::numeric_limits<int>::max()"; int numRows = static_cast<int>(numRows_LL); Epetra_Map rowMap = A.RowMatrixRowMap(); Epetra_Map colMap = A.RowMatrixColMap(); const Epetra_Comm & comm = rowMap.Comm(); long long ioffset = 1 - rowMap.IndexBase64(); // Matlab indices start at 1 long long joffset = 1 - colMap.IndexBase64(); // Matlab indices start at 1 if (comm.MyPID()!=0) { if (A.NumMyRows()!=0) {EPETRA_CHK_ERR(-1);} if (A.NumMyCols()!=0) {EPETRA_CHK_ERR(-1);} } else { if (numRows!=A.NumMyRows()) {EPETRA_CHK_ERR(-1);} Epetra_SerialDenseVector values(A.MaxNumEntries()); Epetra_IntSerialDenseVector indices(A.MaxNumEntries()); for (int i=0; i<numRows; i++) { long long I = rowMap.GID64(i) + ioffset; int numEntries; if (A.ExtractMyRowCopy(i, values.Length(), numEntries, values.Values(), indices.Values())!=0) {EPETRA_CHK_ERR(-1);} for (int j=0; j<numEntries; j++) { long long J = colMap.GID64(indices[j]) + joffset; double val = values[j]; fprintf(handle, "%lld %lld %22.16e\n", I, J, val); } } } return(0); }
int RowMatrixToHandle(FILE * handle, const Epetra_RowMatrix & A) { Epetra_Map map = A.RowMatrixRowMap(); const Epetra_Comm & comm = map.Comm(); int numProc = comm.NumProc(); if (numProc==1 || !A.Map().DistributedGlobal()) writeRowMatrix(handle, A); else { int numRows = map.NumMyElements(); Epetra_Map allGidsMap((int_type) -1, numRows, (int_type) 0,comm); typename Epetra_GIDTypeVector<int_type>::impl allGids(allGidsMap); for (int i=0; i<numRows; i++) allGids[i] = (int_type) map.GID64(i); // Now construct a RowMatrix on PE 0 by strip-mining the rows of the input matrix A. int numChunks = numProc; int stripSize = allGids.GlobalLength64()/numChunks; int remainder = allGids.GlobalLength64()%numChunks; int curStart = 0; int curStripSize = 0; typename Epetra_GIDTypeSerialDenseVector<int_type>::impl importGidList; if (comm.MyPID()==0) importGidList.Size(stripSize+1); // Set size of vector to max needed for (int i=0; i<numChunks; i++) { if (comm.MyPID()==0) { // Only PE 0 does this part curStripSize = stripSize; if (i<remainder) curStripSize++; // handle leftovers for (int j=0; j<curStripSize; j++) importGidList[j] = j + curStart; curStart += curStripSize; } // The following import map will be non-trivial only on PE 0. if (comm.MyPID()>0) assert(curStripSize==0); Epetra_Map importGidMap(-1, curStripSize, importGidList.Values(), 0, comm); Epetra_Import gidImporter(importGidMap, allGidsMap); typename Epetra_GIDTypeVector<int_type>::impl importGids(importGidMap); if (importGids.Import(allGids, gidImporter, Insert)!=0) {EPETRA_CHK_ERR(-1); } // importGids now has a list of GIDs for the current strip of matrix rows. // Use these values to build another importer that will get rows of the matrix. // The following import map will be non-trivial only on PE 0. Epetra_Map importMap(-1, importGids.MyLength(), importGids.Values(), map.IndexBase64(), comm); Epetra_Import importer(importMap, map); Epetra_CrsMatrix importA(Copy, importMap, 0); if (importA.Import(A, importer, Insert)!=0) {EPETRA_CHK_ERR(-1); } if (importA.FillComplete(A.OperatorDomainMap(), importMap)!=0) {EPETRA_CHK_ERR(-1);} // Finally we are ready to write this strip of the matrix to ostream if (writeRowMatrix(handle, importA)!=0) {EPETRA_CHK_ERR(-1);} } } return(0); }
int main(int argc, char *argv[]) { int ierr=0, returnierr=0; #ifdef EPETRA_MPI MPI_Init(&argc,&argv); Epetra_MpiComm Comm(MPI_COMM_WORLD); #else Epetra_SerialComm Comm; #endif bool verbose = false; // Check if we should print results to standard out if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true; if (!verbose) { Comm.SetTracebackMode(0); // This should shut down any error traceback reporting } int MyPID = Comm.MyPID(); int NumProc = Comm.NumProc(); if (verbose && MyPID==0) cout << Epetra_Version() << endl << endl; if (verbose) cout << Comm << endl; bool verbose1 = verbose; if (verbose) verbose = (MyPID==0); int NumMyElements = 10000; int NumMyElements1 = NumMyElements; // Used for local map long long NumGlobalElements = ((long long)NumMyElements)*NumProc+EPETRA_MIN(NumProc,3); if (MyPID < 3) NumMyElements++; long long IndexBase = 0; bool DistributedGlobal = (NumGlobalElements>NumMyElements); Epetra_Map* Map; // Test exceptions if (verbose) cout << "*******************************************************************************************" << endl << " Testing Exceptions (Expect error messages if EPETRA_NO_ERROR_REPORTS is not defined" << endl << "*******************************************************************************************" << endl << endl << endl; try { if (verbose) cout << "Checking Epetra_Map(-2, IndexBase, Comm)" << endl; Map = new Epetra_Map((long long)-2, IndexBase, Comm); } catch (int Error) { if (Error!=-1) { if (Error!=0) { EPETRA_TEST_ERR(Error,returnierr); if (verbose) cout << "Error code should be -1" << endl; } else { cout << "Error code = " << Error << "Should be -1" << endl; returnierr+=1; } } else if (verbose) cout << "Checked OK\n\n" << endl; } try { if (verbose) cout << "Checking Epetra_Map(2, 3, IndexBase, Comm)" << endl; Map = new Epetra_Map((long long)2, 3, IndexBase, Comm); } catch (int Error) { if (Error!=-4) { if (Error!=0) { EPETRA_TEST_ERR(Error,returnierr); if (verbose) cout << "Error code should be -4" << endl; } else { cout << "Error code = " << Error << "Should be -4" << endl; returnierr+=1; } } else if (verbose) cout << "Checked OK\n\n" << endl; } if (verbose) cerr << flush; if (verbose) cout << flush; Comm.Barrier(); if (verbose) cout << endl << endl << "*******************************************************************************************" << endl << " Testing valid constructor now......................................................" << endl << "*******************************************************************************************" << endl << endl << endl; // Test Epetra-defined uniform linear distribution constructor Map = new Epetra_Map(NumGlobalElements, IndexBase, Comm); if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, IndexBase, Comm)" << endl; ierr = checkmap(*Map, NumGlobalElements, NumMyElements, 0, IndexBase, Comm, DistributedGlobal); EPETRA_TEST_ERR(ierr,returnierr); if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl; delete Map; // Test User-defined linear distribution constructor Map = new Epetra_Map(NumGlobalElements, NumMyElements, IndexBase, Comm); if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, NumMyElements, IndexBase, Comm)" << endl; ierr = checkmap(*Map, NumGlobalElements, NumMyElements, 0, IndexBase, Comm, DistributedGlobal); EPETRA_TEST_ERR(ierr,returnierr); if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl; delete Map; // Test User-defined arbitrary distribution constructor // Generate Global Element List. Do in reverse for fun! long long * MyGlobalElements = new long long[NumMyElements]; long long MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase; if (Comm.MyPID()>2) MaxMyGID+=3; for (int i = 0; i<NumMyElements; i++) MyGlobalElements[i] = MaxMyGID-i; Map = new Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements, IndexBase, Comm); if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements, IndexBase, Comm)" << endl; ierr = checkmap(*Map, NumGlobalElements, NumMyElements, MyGlobalElements, IndexBase, Comm, DistributedGlobal); EPETRA_TEST_ERR(ierr,returnierr); if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl; // Test Copy constructor Epetra_Map* Map1 = new Epetra_Map(*Map); // Test SameAs() method bool same = Map1->SameAs(*Map); EPETRA_TEST_ERR(!(same==true),ierr);// should return true since Map1 is a copy of Map Epetra_BlockMap* Map2 = new Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements, IndexBase, Comm); same = Map2->SameAs(*Map); EPETRA_TEST_ERR(!(same==true),ierr); // Map and Map2 were created with the same sets of parameters delete Map2; // now test SameAs() on a map that is different Map2 = new Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements, IndexBase-1, Comm); same = Map2->SameAs(*Map); EPETRA_TEST_ERR(!(same==false),ierr); // IndexBases are different delete Map2; // Back to testing copy constructor if (verbose) cout << "Checking Epetra_Map(*Map)" << endl; ierr = checkmap(*Map1, NumGlobalElements, NumMyElements, MyGlobalElements, IndexBase, Comm, DistributedGlobal); EPETRA_TEST_ERR(ierr,returnierr); if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl; Epetra_Map* SmallMap = 0; if (verbose1) { // Build a small map for test cout. Use 10 elements from current map long long* MyEls = Map->MyGlobalElements64(); long long IndBase = Map->IndexBase64(); int MyLen = EPETRA_MIN(10+Comm.MyPID(),Map->NumMyElements()); SmallMap = new Epetra_Map((long long)-1, MyLen, MyEls, IndBase, Comm); } delete [] MyGlobalElements; delete Map; delete Map1; // Test reference-counting in Epetra_Map if (verbose) cout << "Checking Epetra_Map reference counting" << endl; ierr = checkMapDataClass(Comm, verbose); EPETRA_TEST_ERR(ierr,returnierr); if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl; // Test LocalMap constructor Epetra_LocalMap* LocalMap = new Epetra_LocalMap((long long)NumMyElements1, IndexBase, Comm); if (verbose) cout << "Checking Epetra_LocalMap(NumMyElements1, IndexBase, Comm)" << endl; ierr = checkmap(*LocalMap, NumMyElements1, NumMyElements1, 0, IndexBase, Comm, false); EPETRA_TEST_ERR(ierr,returnierr); if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl; // Test Copy constructor Epetra_LocalMap* LocalMap1 = new Epetra_LocalMap(*LocalMap); if (verbose) cout << "Checking Epetra_LocalMap(*LocalMap)" << endl; ierr = checkmap(*LocalMap1, NumMyElements1, NumMyElements1, 0, IndexBase, Comm, false); EPETRA_TEST_ERR(ierr,returnierr); if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl; delete LocalMap1; delete LocalMap; // Test reference-counting in Epetra_LocalMap if (verbose) cout << "Checking Epetra_LocalMap reference counting" << endl; ierr = checkLocalMapDataClass(Comm, verbose); EPETRA_TEST_ERR(ierr,returnierr); if (verbose && ierr==0) cout << "Checked OK\n\n" <<endl; // Test output if (verbose1) { if (verbose) cout << "Test ostream << operator" << endl << flush; cout << *SmallMap; delete SmallMap; } #ifdef EPETRA_MPI MPI_Finalize(); #endif return returnierr; }
int TLowCommunicationMakeColMapAndReindex(int N, const int * rowptr, int * colind_LID, const int_type *colind_GID, const Epetra_Map& domainMap, const int * owningPIDs, bool SortGhostsAssociatedWithEachProcessor, std::vector<int>& RemotePIDs, MapType1 & NewColMap) { int i,j; // Sanity checks bool UseLL; if(domainMap.GlobalIndicesLongLong()) UseLL=true; else if(domainMap.GlobalIndicesInt()) UseLL=false; else throw std::runtime_error("LowCommunicationMakeColMapAndReindex: cannot detect int type."); // Scan all column indices and sort into two groups: // Local: those whose GID matches a GID of the domain map on this processor and // Remote: All others. int numDomainElements = domainMap.NumMyElements(); bool * LocalGIDs = 0; if (numDomainElements>0) LocalGIDs = new bool[numDomainElements]; for (i=0; i<numDomainElements; i++) LocalGIDs[i] = false; // Assume domain GIDs are not local bool DoSizes = !domainMap.ConstantElementSize(); // If not constant element size, then error if(DoSizes) throw std::runtime_error("LowCommunicationMakeColMapAndReindex: cannot handle non-constant sized domainMap."); // In principle it is good to have RemoteGIDs and RemotGIDList be as long as the number of remote GIDs // on this processor, but this would require two passes through the column IDs, so we make it the max of 100 // and the number of block rows. const int numMyBlockRows = N; int hashsize = numMyBlockRows; if (hashsize < 100) hashsize = 100; Epetra_HashTable<int_type> RemoteGIDs(hashsize); std::vector<int_type> RemoteGIDList; RemoteGIDList.reserve(hashsize); std::vector<int> PIDList; PIDList.reserve(hashsize); // Here we start using the *int* colind array. If int_type==int this clobbers the GIDs, if // int_type==long long, then this is the first use of the colind array. // For *local* GID's set colind with with their LID in the domainMap. For *remote* GIDs, // we set colind with (numDomainElements+NumRemoteColGIDs) before the increment of // the remote count. These numberings will be separate because no local LID is greater // than numDomainElements. int NumLocalColGIDs = 0; int NumRemoteColGIDs = 0; for(i = 0; i < numMyBlockRows; i++) { for(j = rowptr[i]; j < rowptr[i+1]; j++) { int_type GID = colind_GID[j]; // Check if GID matches a row GID int LID = domainMap.LID(GID); if(LID != -1) { bool alreadyFound = LocalGIDs[LID]; if (!alreadyFound) { LocalGIDs[LID] = true; // There is a column in the graph associated with this domain map GID NumLocalColGIDs++; } colind_LID[j] = LID; } else { int_type hash_value=RemoteGIDs.Get(GID); if(hash_value == -1) { // This means its a new remote GID int PID = owningPIDs[j]; if(PID==-1) throw std::runtime_error("LowCommunicationMakeColMapAndReindex: Cannot figure out if PID is owned."); colind_LID[j] = numDomainElements + NumRemoteColGIDs; RemoteGIDs.Add(GID, NumRemoteColGIDs); RemoteGIDList.push_back(GID); PIDList.push_back(PID); NumRemoteColGIDs++; } else colind_LID[j] = numDomainElements + hash_value; } } } // Possible short-circuit: If all domain map GIDs are present as column indices, then set ColMap=domainMap and quit if (domainMap.Comm().NumProc()==1) { if (NumRemoteColGIDs!=0) { throw std::runtime_error("Some column IDs are not in domainMap. If matrix is rectangular, you must pass in a domainMap"); // Sanity test: When one processor,there can be no remoteGIDs } if (NumLocalColGIDs==numDomainElements) { if (LocalGIDs!=0) delete [] LocalGIDs; // In this case, we just use the domainMap's indices, which is, not coincidently, what we clobbered colind with up above anyway. // No further reindexing is needed. NewColMap = domainMap; return 0; } } // Now build integer array containing column GIDs // Build back end, containing remote GIDs, first int numMyBlockCols = NumLocalColGIDs + NumRemoteColGIDs; std::vector<int_type> ColIndices; int_type * RemoteColIndices=0; if(numMyBlockCols > 0) { ColIndices.resize(numMyBlockCols); if(NumLocalColGIDs!=numMyBlockCols) RemoteColIndices = &ColIndices[NumLocalColGIDs]; // Points to back end of ColIndices else RemoteColIndices=0; } for(i = 0; i < NumRemoteColGIDs; i++) RemoteColIndices[i] = RemoteGIDList[i]; // Build permute array for *remote* reindexing. std::vector<int> RemotePermuteIDs(NumRemoteColGIDs); for(i=0; i<NumRemoteColGIDs; i++) RemotePermuteIDs[i]=i; // Sort External column indices so that all columns coming from a given remote processor are contiguous int NumListsInt=0; int NumListsLL =0; int * IntSortLists[2]; long long * LLSortLists[2]; int * RemotePermuteIDs_ptr = RemotePermuteIDs.size() ? &RemotePermuteIDs[0] : 0; if(!UseLL) { // int version IntSortLists[0] = (int*) RemoteColIndices; IntSortLists[1] = RemotePermuteIDs_ptr; NumListsInt=2; } else { //LL version LLSortLists[0] = (long long*) RemoteColIndices; IntSortLists[0] = RemotePermuteIDs_ptr; NumListsInt = NumListsLL = 1; } int * PIDList_ptr = PIDList.size() ? &PIDList[0] : 0; Epetra_Util::Sort(true, NumRemoteColGIDs, PIDList_ptr, 0, 0, NumListsInt, IntSortLists,NumListsLL,LLSortLists); // Stash the RemotePIDs PIDList.resize(NumRemoteColGIDs); RemotePIDs = PIDList; if (SortGhostsAssociatedWithEachProcessor) { // Sort external column indices so that columns from a given remote processor are not only contiguous // but also in ascending order. NOTE: I don't know if the number of externals associated // with a given remote processor is known at this point ... so I count them here. // NTS: Only sort the RemoteColIndices this time... int StartCurrent, StartNext; StartCurrent = 0; StartNext = 1; while ( StartNext < NumRemoteColGIDs ) { if (PIDList[StartNext]==PIDList[StartNext-1]) StartNext++; else { IntSortLists[0] = &RemotePermuteIDs[StartCurrent]; Epetra_Util::Sort(true,StartNext-StartCurrent, &(RemoteColIndices[StartCurrent]),0,0,1,IntSortLists,0,0); StartCurrent = StartNext; StartNext++; } } IntSortLists[0] = &RemotePermuteIDs[StartCurrent]; Epetra_Util::Sort(true, StartNext-StartCurrent, &(RemoteColIndices[StartCurrent]), 0, 0, 1,IntSortLists,0,0); } // Reverse the permutation to get the information we actually care about std::vector<int> ReverseRemotePermuteIDs(NumRemoteColGIDs); for(i=0; i<NumRemoteColGIDs; i++) ReverseRemotePermuteIDs[RemotePermuteIDs[i]]=i; // Build permute array for *local* reindexing. bool use_local_permute=false; std::vector<int> LocalPermuteIDs(numDomainElements); // Now fill front end. Two cases: // (1) If the number of Local column GIDs is the same as the number of Local domain GIDs, we // can simply read the domain GIDs into the front part of ColIndices, otherwise // (2) We step through the GIDs of the domainMap, checking to see if each domain GID is a column GID. // we want to do this to maintain a consistent ordering of GIDs between the columns and the domain. if(NumLocalColGIDs == domainMap.NumMyElements()) { if(NumLocalColGIDs > 0) { domainMap.MyGlobalElements(&ColIndices[0]); // Load Global Indices into first numMyBlockCols elements column GID list } } else { int_type* MyGlobalElements = 0; domainMap.MyGlobalElementsPtr(MyGlobalElements); int* ElementSizeList = 0; if(DoSizes) ElementSizeList = domainMap.ElementSizeList(); int NumLocalAgain = 0; use_local_permute = true; for(i = 0; i < numDomainElements; i++) { if(LocalGIDs[i]) { LocalPermuteIDs[i] = NumLocalAgain; ColIndices[NumLocalAgain++] = MyGlobalElements[i]; } } assert(NumLocalAgain==NumLocalColGIDs); // Sanity test } // Done with this array if (LocalGIDs!=0) delete [] LocalGIDs; // Make Column map with same element sizes as Domain map int_type * ColIndices_ptr = ColIndices.size() ? &ColIndices[0] : 0; MapType2 temp((int_type)(-1), numMyBlockCols, ColIndices_ptr, (int_type)domainMap.IndexBase64(), domainMap.Comm()); NewColMap = temp; // Low-cost reindex of the matrix for(i=0; i<numMyBlockRows; i++){ for(j=rowptr[i]; j<rowptr[i+1]; j++){ int ID=colind_LID[j]; if(ID < numDomainElements){ if(use_local_permute) colind_LID[j] = LocalPermuteIDs[colind_LID[j]]; // In the case where use_local_permute==false, we just copy the DomainMap's ordering, which it so happens // is what we put in colind to begin with. } else colind_LID[j] = NumLocalColGIDs + ReverseRemotePermuteIDs[colind_LID[j]-numDomainElements]; } } return 0; }
static Epetra_Map TCreate_Root_Map(const Epetra_Map& usermap, int root) { int numProc = usermap.Comm().NumProc(); if (numProc==1) { Epetra_Map newmap(usermap); return(newmap); } const Epetra_Comm & comm = usermap.Comm(); bool isRoot = usermap.Comm().MyPID()==root; //if usermap is already completely owned by root then we'll just return a copy of it. int quickreturn = 0; int globalquickreturn = 0; if (isRoot) { if (usermap.NumMyElements()==usermap.NumGlobalElements64()) quickreturn = 1; } else { if (usermap.NumMyElements()==0) quickreturn = 1; } usermap.Comm().MinAll(&quickreturn, &globalquickreturn, 1); if (globalquickreturn==1) { Epetra_Map newmap(usermap); return(newmap); } // Linear map: Simple case, just put all GIDs linearly on root processor if (usermap.LinearMap() && root!=-1) { int numMyElements = 0; if(usermap.MaxAllGID64()+1 > std::numeric_limits<int>::max()) throw "Epetra_Util::Create_Root_Map: cannot fit all gids in int"; if (isRoot) numMyElements = (int)(usermap.MaxAllGID64()+1); Epetra_Map newmap((int_type) -1, numMyElements, (int_type)usermap.IndexBase64(), comm); return(newmap); } if (!usermap.UniqueGIDs()) throw usermap.ReportError("usermap must have unique GIDs",-1); // General map // Build IntVector of the GIDs, then ship them to root processor int numMyElements = usermap.NumMyElements(); Epetra_Map allGidsMap((int_type) -1, numMyElements, (int_type) 0, comm); typename Epetra_GIDTypeVector<int_type>::impl allGids(allGidsMap); for (int i=0; i<numMyElements; i++) allGids[i] = (int_type) usermap.GID64(i); if(usermap.MaxAllGID64() > std::numeric_limits<int>::max()) throw "Epetra_Util::Create_Root_Map: cannot fit all gids in int"; int numGlobalElements = (int) usermap.NumGlobalElements64(); if (root!=-1) { int n1 = 0; if (isRoot) n1 = numGlobalElements; Epetra_Map allGidsOnRootMap((int_type) -1, n1, (int_type) 0, comm); Epetra_Import importer(allGidsOnRootMap, allGidsMap); typename Epetra_GIDTypeVector<int_type>::impl allGidsOnRoot(allGidsOnRootMap); allGidsOnRoot.Import(allGids, importer, Insert); Epetra_Map rootMap((int_type)-1, allGidsOnRoot.MyLength(), allGidsOnRoot.Values(), (int_type)usermap.IndexBase64(), comm); return(rootMap); } else { int n1 = numGlobalElements; Epetra_LocalMap allGidsOnRootMap((int_type) n1, (int_type) 0, comm); Epetra_Import importer(allGidsOnRootMap, allGidsMap); typename Epetra_GIDTypeVector<int_type>::impl allGidsOnRoot(allGidsOnRootMap); allGidsOnRoot.Import(allGids, importer, Insert); Epetra_Map rootMap((int_type) -1, allGidsOnRoot.MyLength(), allGidsOnRoot.Values(), (int_type)usermap.IndexBase64(), comm); return(rootMap); } }
// ============================================================================ void EpetraExt::XMLWriter:: Write(const std::string& Label, const Epetra_Map& Map) { TEUCHOS_TEST_FOR_EXCEPTION(IsOpen_ == false, std::logic_error, "No file has been opened"); long long NumGlobalElements = Map.NumGlobalElements64(); const int* MyGlobalElements_int = 0; const long long* MyGlobalElements_LL = 0; Map.MyGlobalElements(MyGlobalElements_int, MyGlobalElements_LL); if(!MyGlobalElements_int || !MyGlobalElements_LL) throw "EpetraExt::XMLWriter::Write: ERROR, GlobalIndices type unknown."; if (Comm_.MyPID() == 0) { std::ofstream of(FileName_.c_str(), std::ios::app); of << "<Map Label=\"" << Label << "\" NumElements=\"" << NumGlobalElements << '"' << " IndexBase=\"" << Map.IndexBase64() << '"' << " NumProc=\"" << Comm_.NumProc() << '"'; of.close(); } for (int iproc = 0; iproc < Comm_.NumProc(); ++iproc) { if (iproc == Comm_.MyPID()) { std::ofstream of(FileName_.c_str(), std::ios::app); of << " ElementsOnProc" << iproc << "=\"" << Map.NumMyElements() << '"'; of.close(); } Comm_.Barrier(); } if (Comm_.MyPID() == 0) { std::ofstream of(FileName_.c_str(), std::ios::app); of << '>' << std::endl; of.close(); } for (int iproc = 0; iproc < Comm_.NumProc(); iproc++) { if (iproc == Comm_.MyPID()) { std::ofstream of(FileName_.c_str(), std::ios::app); of << "<Proc ID=\"" << Comm_.MyPID() << "\">" << std::endl; if(MyGlobalElements_int) { for (int i = 0; i < Map.NumMyElements(); ++i) { of << MyGlobalElements_int[i] << std::endl; } } else { for (int i = 0; i < Map.NumMyElements(); ++i) { of << MyGlobalElements_LL[i] << std::endl; } } of << "</Proc>" << std::endl; of.close(); } Comm_.Barrier(); } if (Comm_.MyPID() == 0) { std::ofstream of(FileName_.c_str(), std::ios::app); of << "</Map>" << std::endl; of.close(); } }