//========================================================================= int Epetra_LinearProblemRedistor::UpdateRedistProblemValues(Epetra_LinearProblem * ProblemWithNewValues) { if (!RedistProblemCreated_) EPETRA_CHK_ERR(-1); // This method can only be called after CreateRedistProblem() Epetra_RowMatrix * OrigMatrix = ProblemWithNewValues->GetMatrix(); Epetra_MultiVector * OrigLHS = ProblemWithNewValues->GetLHS(); Epetra_MultiVector * OrigRHS = ProblemWithNewValues->GetRHS(); if (OrigMatrix==0) EPETRA_CHK_ERR(-2); // There is no matrix associated with this Problem Epetra_CrsMatrix * RedistMatrix = dynamic_cast<Epetra_CrsMatrix *>(RedistProblem_->GetMatrix()); // Check if the tranpose should be create or not if (ConstructTranspose_) { EPETRA_CHK_ERR(Transposer_->UpdateTransposeValues(OrigMatrix)); } else { // If not, then just do the redistribution based on the the RedistMap EPETRA_CHK_ERR(RedistMatrix->PutScalar(0.0)); // need to do this next step until we generalize the Import/Export ops for CrsMatrix Epetra_CrsMatrix * OrigCrsMatrix = dynamic_cast<Epetra_CrsMatrix *>(OrigMatrix); if (OrigCrsMatrix==0) EPETRA_CHK_ERR(-3); // Broken for a RowMatrix at this point EPETRA_CHK_ERR(RedistMatrix->Export(*OrigCrsMatrix, *RedistExporter_, Add)); } // Now redistribute the RHS and LHS if non-zero if (OrigLHS!=0) { EPETRA_CHK_ERR(RedistProblem_->GetLHS()->Export(*OrigLHS, *RedistExporter_, Add)); } if (OrigRHS!=0) { EPETRA_CHK_ERR(RedistProblem_->GetRHS()->Export(*OrigRHS, *RedistExporter_, Add)); } return(0); }
int MyCreateCrsMatrix( char *in_filename, const Epetra_Comm &Comm, Epetra_Map *& readMap, const bool transpose, const bool distribute, bool& symmetric, Epetra_CrsMatrix *& Matrix ) { Epetra_CrsMatrix * readA = 0; Epetra_Vector * readx = 0; Epetra_Vector * readb = 0; Epetra_Vector * readxexact = 0; // // This hack allows TestOptions to be run from either the test/TestOptions/ directory or from // the test/ directory (as it is in nightly testing and in make "run-tests") // FILE *in_file = fopen( in_filename, "r"); char *filename; if (in_file == NULL ) filename = &in_filename[1] ; // Strip off ithe "." from // "../" and try again else { filename = in_filename ; fclose( in_file ); } symmetric = false ; std::string FileName = filename ; int FN_Size = FileName.size() ; std::string LastFiveBytes = FileName.substr( EPETRA_MAX(0,FN_Size-5), FN_Size ); std::string LastFourBytes = FileName.substr( EPETRA_MAX(0,FN_Size-4), FN_Size ); if ( LastFiveBytes == ".triU" ) { // Call routine to read in unsymmetric Triplet matrix EPETRA_CHK_ERR( Trilinos_Util_ReadTriples2Epetra( filename, false, Comm, readMap, readA, readx, readb, readxexact) ); symmetric = false; } else { if ( LastFiveBytes == ".triS" ) { // Call routine to read in symmetric Triplet matrix EPETRA_CHK_ERR( Trilinos_Util_ReadTriples2Epetra( filename, true, Comm, readMap, readA, readx, readb, readxexact) ); symmetric = true; } else { if ( LastFourBytes == ".mtx" ) { EPETRA_CHK_ERR( Trilinos_Util_ReadMatrixMarket2Epetra( filename, Comm, readMap, readA, readx, readb, readxexact) ); FILE* in_file = fopen( filename, "r"); assert (in_file != NULL) ; // Checked in Trilinos_Util_CountMatrixMarket() const int BUFSIZE = 800 ; char buffer[BUFSIZE] ; fgets( buffer, BUFSIZE, in_file ) ; // Pick symmetry info off of this string std::string headerline1 = buffer; #ifdef TFLOP if ( headerline1.find("symmetric") < BUFSIZE ) symmetric = true; #else if ( headerline1.find("symmetric") != std::string::npos) symmetric = true; #endif fclose(in_file); } else { // Call routine to read in HB problem Trilinos_Util_ReadHb2Epetra( filename, Comm, readMap, readA, readx, readb, readxexact) ; if ( LastFourBytes == ".rsa" ) symmetric = true ; } } } if ( readb ) delete readb; if ( readx ) delete readx; if ( readxexact ) delete readxexact; Epetra_CrsMatrix *serialA ; Epetra_CrsMatrix *transposeA; if ( transpose ) { transposeA = new Epetra_CrsMatrix( Copy, *readMap, 0 ); assert( CrsMatrixTranspose( readA, transposeA ) == 0 ); serialA = transposeA ; delete readA; readA = 0 ; } else { serialA = readA ; } assert( (void *) &serialA->Graph() ) ; assert( (void *) &serialA->RowMap() ) ; assert( serialA->RowMap().SameAs(*readMap) ) ; if ( distribute ) { // Create uniform distributed map Epetra_Map DistMap(readMap->NumGlobalElements(), 0, Comm); // Create Exporter to distribute read-in matrix and vectors Epetra_Export exporter( *readMap, DistMap ); Epetra_CrsMatrix *Amat = new Epetra_CrsMatrix( Copy, DistMap, 0 ); Amat->Export(*serialA, exporter, Add); assert(Amat->FillComplete()==0); Matrix = Amat; // // Make sure that deleting Amat->RowMap() will delete map // // Bug: We can't manage to delete map his way anyway, // and this fails on tranposes, so for now I just accept // the memory loss. // assert( &(Amat->RowMap()) == map ) ; delete readMap; readMap = 0 ; delete serialA; } else { Matrix = serialA; } return 0; }
//========================================================================= int Epetra_LinearProblemRedistor::CreateRedistProblem(const bool ConstructTranspose, const bool MakeDataContiguous, Epetra_LinearProblem *& RedistProblem) { if (RedistProblemCreated_) EPETRA_CHK_ERR(-1); // This method can only be called once Epetra_RowMatrix * OrigMatrix = OrigProblem_->GetMatrix(); Epetra_MultiVector * OrigLHS = OrigProblem_->GetLHS(); Epetra_MultiVector * OrigRHS = OrigProblem_->GetRHS(); if (OrigMatrix==0) EPETRA_CHK_ERR(-2); // There is no matrix associated with this Problem if (RedistMap_==0) { EPETRA_CHK_ERR(GenerateRedistMap()); } RedistExporter_ = new Epetra_Export(OrigProblem_->GetMatrix()->RowMatrixRowMap(), *RedistMap_); RedistProblem_ = new Epetra_LinearProblem(); Epetra_CrsMatrix * RedistMatrix; // Check if the tranpose should be create or not if (ConstructTranspose) { Transposer_ = new Epetra_RowMatrixTransposer(OrigMatrix); EPETRA_CHK_ERR(Transposer_->CreateTranspose(MakeDataContiguous, RedistMatrix, RedistMap_)); } else { // If not, then just do the redistribution based on the the RedistMap RedistMatrix = new Epetra_CrsMatrix(Copy, *RedistMap_, 0); // need to do this next step until we generalize the Import/Export ops for CrsMatrix Epetra_CrsMatrix * OrigCrsMatrix = dynamic_cast<Epetra_CrsMatrix *>(OrigMatrix); EPETRA_CHK_ERR(RedistMatrix->Export(*OrigCrsMatrix, *RedistExporter_, Add)); EPETRA_CHK_ERR(RedistMatrix->FillComplete()); } RedistProblem_->SetOperator(RedistMatrix); // Now redistribute the RHS and LHS if non-zero Epetra_MultiVector * RedistLHS = 0; Epetra_MultiVector * RedistRHS = 0; int ierr = 0; if (OrigLHS!=0) { RedistLHS = new Epetra_MultiVector(*RedistMap_, OrigLHS->NumVectors()); EPETRA_CHK_ERR(RedistLHS->Export(*OrigLHS, *RedistExporter_, Add)); } else ierr = 1; if (OrigRHS!=0) { RedistRHS = new Epetra_MultiVector(*RedistMap_, OrigLHS->NumVectors()); EPETRA_CHK_ERR(RedistRHS->Export(*OrigRHS, *RedistExporter_, Add)); } else ierr ++; RedistProblem_->SetLHS(RedistLHS); RedistProblem_->SetRHS(RedistRHS); RedistProblemCreated_ = true; return(ierr); }
void example (const Epetra_Comm& comm) { // The global number of rows in the matrix A to create. We scale // this relative to the number of (MPI) processes, so that no matter // how many MPI processes you run, every process will have 10 rows. const global_ordinal_type numGblElts = 10 * comm.NumProc (); // The global min global index in all the Maps here. const global_ordinal_type indexBase = 0; // Local error code for use below. // // In the ideal case, we would use this to emulate behavior like // that of Haskell's Maybe in the context of MPI. That is, if one // process experiences an error, we don't want to abort early and // cause the other processes to deadlock on MPI communication // operators. Rather, we want to chain along the local error state, // until we reach a point where it's natural to pass along that // state with other processes. For example, if one is doing an // MPI_Allreduce anyway, it makes sense to pass along one more bit // of information: whether the calling process is in a local error // state. Epetra's interface doesn't let one chain the local error // state in this way, so we use extra collectives below to propagate // that state. The code below uses very conservative error checks; // typical user code would not need to be so conservative and could // therefore avoid all the all-reduces. int lclerr = 0; // Construct a Map that is global (not locally replicated), but puts // all the equations on MPI Proc 0. const int procZeroMapNumLclElts = (comm.MyPID () == 0) ? numGblElts : static_cast<global_ordinal_type> (0); Epetra_Map procZeroMap (numGblElts, procZeroMapNumLclElts, indexBase, comm); // Construct a Map that puts approximately the same number of // equations on each processor. Epetra_Map globalMap (numGblElts, indexBase, comm); // Create a sparse matrix using procZeroMap. Epetra_CrsMatrix* A = createCrsMatrix (procZeroMap); if (A == NULL) { lclerr = 1; } // Make sure that sparse matrix creation succeeded. Normally you // don't have to check this; we are being extra conservative because // this example is also a test. Even though the matrix's rows live // entirely on Process 0, the matrix is nonnull on all processes in // its Map's communicator. int gblerr = 0; (void) comm.MaxAll (&lclerr, &gblerr, 1); if (gblerr != 0) { throw std::runtime_error ("createCrsMatrix returned NULL on at least one " "process."); } // // We've created a sparse matrix whose rows live entirely on MPI // Process 0. Now we want to distribute it over all the processes. // // Redistribute the matrix. Since both the source and target Maps // are one-to-one, we could use either an Import or an Export. If // only the source Map were one-to-one, we would have to use an // Import; if only the target Map were one-to-one, we would have to // use an Export. We do not allow redistribution using Import or // Export if neither source nor target Map is one-to-one. // Make an export object with procZeroMap as the source Map, and // globalMap as the target Map. The Export type has the same // template parameters as a Map. Note that Export does not depend // on the Scalar template parameter of the objects it // redistributes. You can reuse the same Export for different // Tpetra object types, or for Tpetra objects of the same type but // different Scalar template parameters (e.g., Scalar=float or // Scalar=double). Epetra_Export exporter (procZeroMap, globalMap); // Make a new sparse matrix whose row map is the global Map. Epetra_CrsMatrix B (Copy, globalMap, 0); // Redistribute the data, NOT in place, from matrix A (which lives // entirely on Proc 0) to matrix B (which is distributed evenly over // the processes). // // Export() has collective semantics, so we must always call it on // all processes collectively. This is why we don't select on // lclerr, as we do for the local operations above. lclerr = B.Export (*A, exporter, Insert); // Make sure that the Export succeeded. Normally you don't have to // check this; we are being extra conservative because this example // example is also a test. We test both min and max, since lclerr // may be negative, zero, or positive. gblerr = 0; (void) comm.MinAll (&lclerr, &gblerr, 1); if (gblerr != 0) { throw std::runtime_error ("Export() failed on at least one process."); } (void) comm.MaxAll (&lclerr, &gblerr, 1); if (gblerr != 0) { throw std::runtime_error ("Export() failed on at least one process."); } // FillComplete has collective semantics, so we must always call it // on all processes collectively. This is why we don't select on // lclerr, as we do for the local operations above. lclerr = B.FillComplete (); // Make sure that FillComplete succeeded. Normally you don't have // to check this; we are being extra conservative because this // example is also a test. We test both min and max, since lclerr // may be negative, zero, or positive. gblerr = 0; (void) comm.MinAll (&lclerr, &gblerr, 1); if (gblerr != 0) { throw std::runtime_error ("B.FillComplete() failed on at least one process."); } (void) comm.MaxAll (&lclerr, &gblerr, 1); if (gblerr != 0) { throw std::runtime_error ("B.FillComplete() failed on at least one process."); } if (A != NULL) { delete A; } }