Esempio n. 1
0
void RieszRep::computeRieszRep(int cubatureEnrichment){
#ifdef HAVE_MPI
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
  //cout << "rank: " << rank << " of " << numProcs << endl;
#else
  Epetra_SerialComm Comm;
#endif  

  set<GlobalIndexType> cellIDs = _mesh->cellIDsInPartition();
  for (set<GlobalIndexType>::iterator cellIDIt=cellIDs.begin(); cellIDIt !=cellIDs.end(); cellIDIt++){
    GlobalIndexType cellID = *cellIDIt;

    ElementTypePtr elemTypePtr = _mesh->getElementType(cellID);
    DofOrderingPtr testOrderingPtr = elemTypePtr->testOrderPtr;
    int numTestDofs = testOrderingPtr->totalDofs();

    BasisCachePtr basisCache = BasisCache::basisCacheForCell(_mesh,cellID,true,cubatureEnrichment);

    FieldContainer<double> rhsValues(1,numTestDofs);
    _rhs->integrate(rhsValues, testOrderingPtr, basisCache);
    if (_printAll){
      cout << "RieszRep: LinearTerm values for cell " << cellID << ":\n " << rhsValues << endl;
    }
  
    FieldContainer<double> ipMatrix(1,numTestDofs,numTestDofs);      
    _ip->computeInnerProductMatrix(ipMatrix,testOrderingPtr, basisCache);

    bool printOutRiesz = false;
    if (printOutRiesz){
      cout << " ============================ In RIESZ ==========================" << endl;
      cout << "matrix: \n" << ipMatrix;
    }
    
    FieldContainer<double> rieszRepDofs(numTestDofs,1);
    ipMatrix.resize(numTestDofs,numTestDofs);
    rhsValues.resize(numTestDofs,1);
    int success = SerialDenseWrapper::solveSystemUsingQR(rieszRepDofs, ipMatrix, rhsValues);
    
    if (success != 0) {
      cout << "RieszRep::computeRieszRep: Solve FAILED with error: " << success << endl;
    }

//    rieszRepDofs.Multiply(true,rhsVectorCopy, normSq); // equivalent to e^T * R_V * e
    double normSquared = SerialDenseWrapper::dot(rieszRepDofs, rhsValues);
    _rieszRepNormSquared[cellID] = normSquared;

//    cout << "normSquared for cell " << cellID << ": " << _rieszRepNormSquared[cellID] << endl;
    
    if (printOutRiesz){
      cout << "rhs: \n" << rhsValues;
      cout << "dofs: \n" << rieszRepDofs;
      cout << " ================================================================" << endl;
    }

    FieldContainer<double> dofs(numTestDofs);
    for (int i = 0;i<numTestDofs;i++){
      dofs(i) = rieszRepDofs(i,0);
    }
    _rieszRepDofs[cellID] = dofs;
  }
  distributeDofs();
  _repsNotComputed = false;
}
Esempio n. 2
0
//=============================================================================
int Amesos_Mumps::ConvertToTriplet(const bool OnlyValues)
{

  Epetra_RowMatrix* ptr;
  if (Comm().NumProc() == 1)
    ptr = &Matrix();
  else {
    ptr = &RedistrMatrix(true);
  }

  ResetTimer();
  
#ifdef EXTRA_DEBUG_INFO
  Epetra_CrsMatrix* Eptr = dynamic_cast<Epetra_CrsMatrix*>( ptr );
  if ( ptr->NumGlobalNonzeros() < 300 ) SetICNTL(4,3 );  // Enable more debug info for small matrices
  if ( ptr->NumGlobalNonzeros() < 42 && Eptr ) { 
      std::cout << " Matrix = " << std::endl ; 
      Eptr->Print( std::cout ) ; 
  } else {
      assert( Eptr );
  }
#endif

  Row.resize(ptr->NumMyNonzeros());
  Col.resize(ptr->NumMyNonzeros());
  Val.resize(ptr->NumMyNonzeros());

  int MaxNumEntries = ptr->MaxNumEntries();
  std::vector<int> Indices;
  std::vector<double> Values;
  Indices.resize(MaxNumEntries);
  Values.resize(MaxNumEntries);

  int count = 0;

  for (int i = 0; i < ptr->NumMyRows() ; ++i) {

    int GlobalRow = ptr->RowMatrixRowMap().GID(i);

    int NumEntries = 0;
    int ierr;
    ierr = ptr->ExtractMyRowCopy(i, MaxNumEntries,
				   NumEntries, &Values[0],
				   &Indices[0]);
    AMESOS_CHK_ERR(ierr);

    for (int j = 0 ; j < NumEntries ; ++j) {
      if (OnlyValues == false) {
	Row[count] = GlobalRow + 1;
	Col[count] = ptr->RowMatrixColMap().GID(Indices[j]) + 1;
      }
      
      // MS // Added on 15-Mar-05.
      if (AddToDiag_ && Indices[j] == i)
        Values[j] += AddToDiag_;

      Val[count] = Values[j];
      count++;
    }
  }

  MtxConvTime_ = AddTime("Total matrix conversion time", MtxConvTime_);
  
  assert (count <= ptr->NumMyNonzeros());

  return(0);
}
Esempio n. 3
0
int Amesos_Mumps::Solve()
{ 
  if (IsNumericFactorizationOK_ == false)
    AMESOS_CHK_ERR(NumericFactorization());

  Epetra_MultiVector* vecX = Problem_->GetLHS(); 
  Epetra_MultiVector* vecB = Problem_->GetRHS();
  int NumVectors = vecX->NumVectors();

  if ((vecX == 0) || (vecB == 0))
    AMESOS_CHK_ERR(-1);

  if (NumVectors != vecB->NumVectors())
    AMESOS_CHK_ERR(-1);

  if (Comm().NumProc() == 1) 
  {
    // do not import any data
    for (int j = 0 ; j < NumVectors; j++) 
    {
      ResetTimer();

      MDS.job = 3;     // Request solve

      for (int i = 0 ; i < Matrix().NumMyRows() ; ++i) 
	(*vecX)[j][i] = (*vecB)[j][i];
      MDS.rhs = (*vecX)[j];

      dmumps_c(&(MDS)) ;  // Perform solve
      static_cast<void>( CheckError( ) );   // Can hang 
      SolveTime_ = AddTime("Total solve time", SolveTime_);
    }
  } 
  else 
  {
    Epetra_MultiVector SerialVector(SerialMap(),NumVectors);

    ResetTimer();
    AMESOS_CHK_ERR(SerialVector.Import(*vecB,SerialImporter(),Insert));
    VecRedistTime_ = AddTime("Total vector redistribution time", VecRedistTime_);
    
    for (int j = 0 ; j < NumVectors; j++) 
    {
      if (Comm().MyPID() == 0)
	MDS.rhs = SerialVector[j];

      // solve the linear system and take time
      MDS.job = 3;     
      ResetTimer();
      if (Comm().MyPID() < MaxProcs_) 
	dmumps_c(&(MDS)) ;  // Perform solve
      static_cast<void>( CheckError( ) );   // Can hang 

      SolveTime_ = AddTime("Total solve time", SolveTime_);
    }

    // ship solution back and take timing
    ResetTimer();
    AMESOS_CHK_ERR(vecX->Export(SerialVector,SerialImporter(),Insert));
    VecRedistTime_ = AddTime("Total vector redistribution time", VecRedistTime_);
  }

  if (ComputeTrueResidual_)
    ComputeTrueResidual(Matrix(), *vecX, *vecB, UseTranspose(), "Amesos_Mumps");

  if (ComputeVectorNorms_)
    ComputeVectorNorms(*vecX, *vecB, "Amesos_Mumps");

  NumSolve_++;  
  
  return(0) ; 
}
Esempio n. 4
0
int main(int argc, char *argv[]) {

  int ierr = 0;

#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);

  Epetra_MpiComm Comm(MPI_COMM_WORLD);

#else

  Epetra_SerialComm Comm;

#endif

  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  int verbose_int = verbose ? 1 : 0;
  Comm.Broadcast(&verbose_int, 1, 0);
  verbose = verbose_int==1 ? true : false;

  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  if (verbose && MyPID==0)
    cout << Epetra_Version() << endl << endl;

  if (verbose) cout << Comm <<endl;

  int NumVectors = 1;
  int NumMyElements = 4;
  int NumGlobalElements = NumMyElements*NumProc;
  int IndexBase = 0;

  Epetra_Map Map(NumGlobalElements, NumMyElements, IndexBase, Comm);

  EPETRA_TEST_ERR( quad1(Map, verbose), ierr);

  EPETRA_TEST_ERR( quad2(Map, verbose), ierr);

  EPETRA_TEST_ERR( MultiVectorTests(Map, NumVectors, verbose), ierr);

  bool preconstruct_graph = false;

  EPETRA_TEST_ERR( four_quads(Comm, preconstruct_graph, verbose), ierr);

  preconstruct_graph = true;

  EPETRA_TEST_ERR( four_quads(Comm, preconstruct_graph, verbose), ierr);

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return ierr;
}
Esempio n. 5
0
//=============================================================================
int Amesos_Klu::PerformNumericFactorization( ) 
{

  // Changed this; it was "if (!TrustMe)...
  // The behavior is not intuitive. Maybe we should introduce a new
  // parameter, FastSolvers or something like that, that does not perform
  // any AddTime, ResetTimer, GetTime.
  // HKT, 1/18/2007:  The "TrustMe_" flag was put back in this code due to performance degradation; Bug# 3042.
  
  if (! TrustMe_ ) ResetTimer(0);

  // This needs to be 1 just in case the pivot ordering is reused.
  int numeric_ok = 1;
  
  if (MyPID_ == 0) {
    
    bool factor_with_pivoting = true ;
    
    // set the default parameters
    PrivateKluData_->common_->scale = ScaleMethod_ ;
    
    const bool NumericNonZero =  PrivateKluData_->Numeric_.get() != 0 ; 
    
    // see if we can "refactorize"
    if ( refactorize_ && NumericNonZero ) { 
      // refactorize using the existing Symbolic and Numeric objects, and
      // using the identical pivot ordering as the prior klu_factor.
      // No partial pivoting is done.
      int result = amesos_klu_refactor (&Ap[0], Ai, Aval,
					&*PrivateKluData_->Symbolic_, 
					&*PrivateKluData_->Numeric_, &*PrivateKluData_->common_) ;
      // Did it work?
      const  bool refactor_ok = result == 1 && PrivateKluData_->common_->status == KLU_OK ;
      if ( refactor_ok ) { 
	
	amesos_klu_rcond (&*PrivateKluData_->Symbolic_,
			  &*PrivateKluData_->Numeric_,
			  &*PrivateKluData_->common_) ;
	
	double rcond = PrivateKluData_->common_->rcond;
 	
	if ( rcond > rcond_threshold_ ) {
	  // factorizing without pivot worked fine.  We are done.
	  factor_with_pivoting = false ;
	}
      }
    }
    
    if ( factor_with_pivoting ) {
      
      // factor with partial pivoting:
      // either this is the first time we are factoring the matrix, or the
      // refactorize parameter is false, or we tried to refactorize and
      // found it to be too inaccurate.
      
      // factor the matrix using partial pivoting
      PrivateKluData_->Numeric_ =
	rcpWithDealloc( amesos_klu_factor(&Ap[0], Ai, Aval,
			       &*PrivateKluData_->Symbolic_, &*PrivateKluData_->common_),
	     deallocFunctorDeleteWithCommon<klu_numeric>(PrivateKluData_->common_,amesos_klu_free_numeric)
	     ,true
	     );
      
      numeric_ok =  PrivateKluData_->Numeric_.get()!=NULL 
	&& PrivateKluData_->common_->status == KLU_OK ;
      
    }
  }
  
  // Communicate the state of the numeric factorization with everyone.
  Comm().Broadcast(&numeric_ok, 1, 0);

  if ( ! numeric_ok ) {
    if (MyPID_ == 0) {
      AMESOS_CHK_ERR( NumericallySingularMatrixError );  
    } else 
      return( NumericallySingularMatrixError );
  }
  
  if ( !TrustMe_ ) {
    NumFactTime_ = AddTime("Total numeric factorization time", NumFactTime_, 0);
  }
  
  return 0;
}
Esempio n. 6
0
int main(int argc, char *argv[])
{
  int ierr = 0, forierr = 0;
  bool debug = false;

#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);
  int rank; // My process ID

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );

#else

  int rank = 0;
  Epetra_SerialComm Comm;

#endif

  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  int verbose_int = verbose ? 1 : 0;
  Comm.Broadcast(&verbose_int, 1, 0);
  verbose = verbose_int==1 ? true : false;


  //  char tmp;
  //  if (rank==0) cout << "Press any key to continue..."<< std::endl;
  //  if (rank==0) cin >> tmp;
  //  Comm.Barrier();

  Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  if(verbose && MyPID==0)
    cout << Epetra_Version() << std::endl << std::endl;

  if (verbose) cout << "Processor "<<MyPID<<" of "<< NumProc
		    << " is alive."<<endl;

  bool verbose1 = verbose;

  // Redefine verbose to only print on PE 0
  if(verbose && rank!=0) 
		verbose = false;

  int NumMyEquations = 10000;
  int NumGlobalEquations = (NumMyEquations * NumProc) + EPETRA_MIN(NumProc,3);
  if(MyPID < 3) 
    NumMyEquations++;

  // Construct a Map that puts approximately the same Number of equations on each processor

  Epetra_Map Map(NumGlobalEquations, NumMyEquations, 0, Comm);
  
  // Get update list and number of local equations from newly created Map
  int* MyGlobalElements = new int[Map.NumMyElements()];
  Map.MyGlobalElements(MyGlobalElements);

  // Create an integer vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation on this processor

  int* NumNz = new int[NumMyEquations];

  // We are building a tridiagonal matrix where each row has (-1 2 -1)
  // So we need 2 off-diagonal terms (except for the first and last equation)

  for (int i = 0; i < NumMyEquations; i++)
    if((MyGlobalElements[i] == 0) || (MyGlobalElements[i] == NumGlobalEquations - 1))
      NumNz[i] = 1;
    else
      NumNz[i] = 2;

  // Create a Epetra_Matrix

  Epetra_CrsMatrix A(Copy, Map, NumNz);
  EPETRA_TEST_ERR(A.IndicesAreGlobal(),ierr);
  EPETRA_TEST_ERR(A.IndicesAreLocal(),ierr);
  
  // Add  rows one-at-a-time
  // Need some vectors to help
  // Off diagonal Values will always be -1


  double* Values = new double[2];
  Values[0] = -1.0; 
	Values[1] = -1.0;
  int* Indices = new int[2];
  double two = 2.0;
  int NumEntries;

  forierr = 0;
  for (int i = 0; i < NumMyEquations; i++) {
    if(MyGlobalElements[i] == 0) {
			Indices[0] = 1;
			NumEntries = 1;
		}
    else if (MyGlobalElements[i] == NumGlobalEquations-1) {
			Indices[0] = NumGlobalEquations-2;
			NumEntries = 1;
		}
    else {
			Indices[0] = MyGlobalElements[i]-1;
			Indices[1] = MyGlobalElements[i]+1;
			NumEntries = 2;
		}
		forierr += !(A.InsertGlobalValues(MyGlobalElements[i], NumEntries, Values, Indices)==0);
		forierr += !(A.InsertGlobalValues(MyGlobalElements[i], 1, &two, MyGlobalElements+i)>0); // Put in the diagonal entry
  }
  EPETRA_TEST_ERR(forierr,ierr);

  int * indexOffsetTmp;
  int * indicesTmp;
  double * valuesTmp;
  // Finish up
  EPETRA_TEST_ERR(!(A.IndicesAreGlobal()),ierr);
  EPETRA_TEST_ERR(!(A.ExtractCrsDataPointers(indexOffsetTmp, indicesTmp, valuesTmp)==-1),ierr);  // Should fail
  EPETRA_TEST_ERR(!(A.FillComplete(false)==0),ierr);
  EPETRA_TEST_ERR(!(A.ExtractCrsDataPointers(indexOffsetTmp, indicesTmp, valuesTmp)==-1),ierr);  // Should fail
  EPETRA_TEST_ERR(!(A.IndicesAreLocal()),ierr);
  EPETRA_TEST_ERR(A.StorageOptimized(),ierr);
  A.OptimizeStorage();
  EPETRA_TEST_ERR(!(A.StorageOptimized()),ierr);
  EPETRA_TEST_ERR(!(A.ExtractCrsDataPointers(indexOffsetTmp, indicesTmp, valuesTmp)==0),ierr);  // Should succeed
  const Epetra_CrsGraph & GofA = A.Graph();
  EPETRA_TEST_ERR((indicesTmp!=GofA[0] || valuesTmp!=A[0]),ierr); // Extra check to see if operator[] is consistent
  EPETRA_TEST_ERR(A.UpperTriangular(),ierr);
  EPETRA_TEST_ERR(A.LowerTriangular(),ierr);
	
  int NumMyNonzeros = 3 * NumMyEquations;
  if(A.LRID(0) >= 0) 
		NumMyNonzeros--; // If I own first global row, then there is one less nonzero
  if(A.LRID(NumGlobalEquations-1) >= 0) 
		NumMyNonzeros--; // If I own last global row, then there is one less nonzero
  EPETRA_TEST_ERR(check(A, NumMyEquations, NumGlobalEquations, NumMyNonzeros, 3*NumGlobalEquations-2, 
	       MyGlobalElements, verbose),ierr);
  forierr = 0;
  for (int i = 0; i < NumMyEquations; i++) 
		forierr += !(A.NumGlobalEntries(MyGlobalElements[i])==NumNz[i]+1);
  EPETRA_TEST_ERR(forierr,ierr);
  forierr = 0;
  for (int i = 0; i < NumMyEquations; i++) 
		forierr += !(A.NumMyEntries(i)==NumNz[i]+1);
  EPETRA_TEST_ERR(forierr,ierr);

  if (verbose) cout << "\n\nNumEntries function check OK" << std::endl<< std::endl;

  EPETRA_TEST_ERR(check_graph_sharing(Comm),ierr);

  // Create vectors for Power method

  Epetra_Vector q(Map);
  Epetra_Vector z(Map);
  Epetra_Vector resid(Map);

  // variable needed for iteration
  double lambda = 0.0;
  // int niters = 10000;
  int niters = 200;
  double tolerance = 1.0e-1;

  /////////////////////////////////////////////////////////////////////////////////////////////////
	
  // Iterate

  Epetra_Flops flopcounter;
  A.SetFlopCounter(flopcounter);
  q.SetFlopCounter(A);
  z.SetFlopCounter(A);
  resid.SetFlopCounter(A);
	

  Epetra_Time timer(Comm);
  EPETRA_TEST_ERR(power_method(false, A, q, z, resid, &lambda, niters, tolerance, verbose),ierr);
  double elapsed_time = timer.ElapsedTime();
  double total_flops = A.Flops() + q.Flops() + z.Flops() + resid.Flops();
  double MFLOPs = total_flops/elapsed_time/1000000.0;

  if (verbose) cout << "\n\nTotal MFLOPs for first solve = " << MFLOPs << std::endl<< std::endl;

  /////////////////////////////////////////////////////////////////////////////////////////////////
	
  // Solve transpose problem

  if (verbose) cout << "\n\nUsing transpose of matrix and solving again (should give same result).\n\n"
		    << std::endl;
  // Iterate
  lambda = 0.0;
  flopcounter.ResetFlops();
  timer.ResetStartTime();
  EPETRA_TEST_ERR(power_method(true, A, q, z, resid, &lambda, niters, tolerance, verbose),ierr);
  elapsed_time = timer.ElapsedTime();
  total_flops = A.Flops() + q.Flops() + z.Flops() + resid.Flops();
  MFLOPs = total_flops/elapsed_time/1000000.0;

  if (verbose) cout << "\n\nTotal MFLOPs for transpose solve = " << MFLOPs << std::endl<< endl;

  /////////////////////////////////////////////////////////////////////////////////////////////////

  // Increase diagonal dominance

  if (verbose) cout << "\n\nIncreasing the magnitude of first diagonal term and solving again\n\n"
		    << endl;

  
  if (A.MyGlobalRow(0)) {
    int numvals = A.NumGlobalEntries(0);
    double * Rowvals = new double [numvals];
    int    * Rowinds = new int    [numvals];
    A.ExtractGlobalRowCopy(0, numvals, numvals, Rowvals, Rowinds); // Get A[0,0]

    for (int i=0; i<numvals; i++) if (Rowinds[i] == 0) Rowvals[i] *= 10.0;
    
    A.ReplaceGlobalValues(0, numvals, Rowvals, Rowinds);
    delete [] Rowvals;
    delete [] Rowinds;
  }
  // Iterate (again)
  lambda = 0.0;
  flopcounter.ResetFlops();
  timer.ResetStartTime();
  EPETRA_TEST_ERR(power_method(false, A, q, z, resid, &lambda, niters, tolerance, verbose),ierr);
  elapsed_time = timer.ElapsedTime();
  total_flops = A.Flops() + q.Flops() + z.Flops() + resid.Flops();
  MFLOPs = total_flops/elapsed_time/1000000.0;

  if (verbose) cout << "\n\nTotal MFLOPs for second solve = " << MFLOPs << endl<< endl;

  /////////////////////////////////////////////////////////////////////////////////////////////////

  // Solve transpose problem

  if (verbose) cout << "\n\nUsing transpose of matrix and solving again (should give same result).\n\n"
		    << endl;

  // Iterate (again)
  lambda = 0.0;
  flopcounter.ResetFlops();
  timer.ResetStartTime();
  EPETRA_TEST_ERR(power_method(true, A, q, z, resid, &lambda, niters, tolerance, verbose),ierr);
  elapsed_time = timer.ElapsedTime();
  total_flops = A.Flops() + q.Flops() + z.Flops() + resid.Flops();
  MFLOPs = total_flops/elapsed_time/1000000.0;


  if (verbose) cout << "\n\nTotal MFLOPs for tranpose of second solve = " << MFLOPs << endl<< endl;

  if (verbose) cout << "\n\n*****Testing constant entry constructor" << endl<< endl;

  Epetra_CrsMatrix AA(Copy, Map, 5);
  
  if (debug) Comm.Barrier();

  double dble_one = 1.0;
  for (int i=0; i< NumMyEquations; i++) AA.InsertGlobalValues(MyGlobalElements[i], 1, &dble_one, MyGlobalElements+i);

  // Note:  All processors will call the following Insert routines, but only the processor
  //        that owns it will actually do anything

  int One = 1;
  if (AA.MyGlobalRow(0)) {
    EPETRA_TEST_ERR(!(AA.InsertGlobalValues(0, 0, &dble_one, &One)==0),ierr);
  }
  else EPETRA_TEST_ERR(!(AA.InsertGlobalValues(0, 1, &dble_one, &One)==-1),ierr);
  EPETRA_TEST_ERR(!(AA.FillComplete(false)==0),ierr);
  EPETRA_TEST_ERR(AA.StorageOptimized(),ierr);
  EPETRA_TEST_ERR(!(AA.UpperTriangular()),ierr);
  EPETRA_TEST_ERR(!(AA.LowerTriangular()),ierr);
  
  if (debug) Comm.Barrier();
  EPETRA_TEST_ERR(check(AA, NumMyEquations, NumGlobalEquations, NumMyEquations, NumGlobalEquations, 
	       MyGlobalElements, verbose),ierr);

  if (debug) Comm.Barrier();

  forierr = 0;
  for (int i=0; i<NumMyEquations; i++) forierr += !(AA.NumGlobalEntries(MyGlobalElements[i])==1);
  EPETRA_TEST_ERR(forierr,ierr);

  if (verbose) cout << "\n\nNumEntries function check OK" << endl<< endl;

  if (debug) Comm.Barrier();

  if (verbose) cout << "\n\n*****Testing copy constructor" << endl<< endl;

  Epetra_CrsMatrix B(AA);
  EPETRA_TEST_ERR(check(B, NumMyEquations, NumGlobalEquations, NumMyEquations, NumGlobalEquations, 
	       MyGlobalElements, verbose),ierr);

  forierr = 0;
  for (int i=0; i<NumMyEquations; i++) forierr += !(B.NumGlobalEntries(MyGlobalElements[i])==1);
  EPETRA_TEST_ERR(forierr,ierr);

  if (verbose) cout << "\n\nNumEntries function check OK" << endl<< endl;

  if (debug) Comm.Barrier();

  if (verbose) cout << "\n\n*****Testing local view constructor" << endl<< endl;

  Epetra_CrsMatrix BV(View, AA.RowMap(), AA.ColMap(), 0);

  forierr = 0;
  int* Inds;
  double* Vals;
  for (int i = 0; i < NumMyEquations; i++) {
    forierr += !(AA.ExtractMyRowView(i, NumEntries, Vals, Inds)==0);
    forierr += !(BV.InsertMyValues(i, NumEntries, Vals, Inds)==0);
  }
  BV.FillComplete(false);
  EPETRA_TEST_ERR(check(BV, NumMyEquations, NumGlobalEquations, NumMyEquations, NumGlobalEquations, 
												MyGlobalElements, verbose),ierr);

  forierr = 0;
  for (int i=0; i<NumMyEquations; i++) forierr += !(BV.NumGlobalEntries(MyGlobalElements[i])==1);
  EPETRA_TEST_ERR(forierr,ierr);

  if (verbose) cout << "\n\nNumEntries function check OK" << endl<< endl;

  if (debug) Comm.Barrier();
  if (verbose) cout << "\n\n*****Testing post construction modifications" << endl<< endl;

  EPETRA_TEST_ERR(!(B.InsertGlobalValues(0, 1, &dble_one, &One)==-2),ierr);


  // Release all objects
  delete [] NumNz;
  delete [] Values;
  delete [] Indices;
  delete [] MyGlobalElements;
			

  if (verbose1) {
    // Test ostream << operator (if verbose1)
    // Construct a Map that puts 2 equations on each PE
    
    int NumMyElements1 = 2;
    int NumMyEquations1 = NumMyElements1;
    int NumGlobalEquations1 = NumMyEquations1*NumProc;

    Epetra_Map Map1(-1, NumMyElements1, 0, Comm);
    
    // Get update list and number of local equations from newly created Map
    int * MyGlobalElements1 = new int[Map1.NumMyElements()];
    Map1.MyGlobalElements(MyGlobalElements1);
    
    // Create an integer vector NumNz that is used to build the Petra Matrix.
    // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation on this processor
    
    int * NumNz1 = new int[NumMyEquations1];
    
    // We are building a tridiagonal matrix where each row has (-1 2 -1)
    // So we need 2 off-diagonal terms (except for the first and last equation)
    
    for (int i=0; i<NumMyEquations1; i++)
      if (MyGlobalElements1[i]==0 || MyGlobalElements1[i] == NumGlobalEquations1-1)
	NumNz1[i] = 1;
      else
	NumNz1[i] = 2;
    
    // Create a Epetra_Matrix
    
    Epetra_CrsMatrix A1(Copy, Map1, NumNz1);
    
    // Add  rows one-at-a-time
    // Need some vectors to help
    // Off diagonal Values will always be -1
    
    
    double *Values1 = new double[2];
    Values1[0] = -1.0; Values1[1] = -1.0;
    int *Indices1 = new int[2];
    double two1 = 2.0;
    int NumEntries1;

    forierr = 0;
    for (int i=0; i<NumMyEquations1; i++)
      {
	if (MyGlobalElements1[i]==0)
	  {
	    Indices1[0] = 1;
	    NumEntries1 = 1;
	  }
	else if (MyGlobalElements1[i] == NumGlobalEquations1-1)
	  {
	    Indices1[0] = NumGlobalEquations1-2;
	    NumEntries1 = 1;
	  }
	else
	  {
	    Indices1[0] = MyGlobalElements1[i]-1;
	    Indices1[1] = MyGlobalElements1[i]+1;
	    NumEntries1 = 2;
	  }
	forierr += !(A1.InsertGlobalValues(MyGlobalElements1[i], NumEntries1, Values1, Indices1)==0);
	forierr += !(A1.InsertGlobalValues(MyGlobalElements1[i], 1, &two1, MyGlobalElements1+i)>0); // Put in the diagonal entry
      }
    EPETRA_TEST_ERR(forierr,ierr);
    delete [] Indices1;
    delete [] Values1;
    
    // Finish up
    EPETRA_TEST_ERR(!(A1.FillComplete(false)==0),ierr);
    
    // Test diagonal extraction function

    Epetra_Vector checkDiag(Map1);
    EPETRA_TEST_ERR(!(A1.ExtractDiagonalCopy(checkDiag)==0),ierr);

    forierr = 0;
    for (int i=0; i<NumMyEquations1; i++) forierr += !(checkDiag[i]==two1);
    EPETRA_TEST_ERR(forierr,ierr);

    // Test diagonal replacement method

    forierr = 0;
    for (int i=0; i<NumMyEquations1; i++) checkDiag[i]=two1*two1;
    EPETRA_TEST_ERR(forierr,ierr);

    EPETRA_TEST_ERR(!(A1.ReplaceDiagonalValues(checkDiag)==0),ierr);

    Epetra_Vector checkDiag1(Map1);
    EPETRA_TEST_ERR(!(A1.ExtractDiagonalCopy(checkDiag1)==0),ierr);

    forierr = 0;
    for (int i=0; i<NumMyEquations1; i++) forierr += !(checkDiag[i]==checkDiag1[i]);
    EPETRA_TEST_ERR(forierr,ierr);

    if (verbose) cout << "\n\nDiagonal extraction and replacement OK.\n\n" << endl;

    double orignorm = A1.NormOne();
    EPETRA_TEST_ERR(!(A1.Scale(4.0)==0),ierr);
    EPETRA_TEST_ERR(!(A1.NormOne()!=orignorm),ierr);
    
    if (verbose) cout << "\n\nMatrix scale OK.\n\n" << endl;

    if (verbose) cout << "\n\nPrint out tridiagonal matrix, each part on each processor.\n\n" << endl;
    cout << A1 << endl;
   

  // Release all objects
  delete [] NumNz1;
  delete [] MyGlobalElements1;

  }

  if (verbose) cout << "\n\n*****Testing LeftScale and RightScale" << endl << endl;

  int NumMyElements2 = 7;
  int NumMyRows2 = 1;//This value should not be changed without editing the
		// code below.
  Epetra_Map RowMap(-1,NumMyRows2,0,Comm);
  Epetra_Map ColMap(NumMyElements2,NumMyElements2,0,Comm);
  // The DomainMap needs to be different from the ColMap for the test to 
  // be meaningful.
  Epetra_Map DomainMap(NumMyElements2,0,Comm);
  int NumMyRangeElements2 = 0;
  // We need to distribute the elements differently for the range map also.
  if (MyPID % 2 == 0)
    NumMyRangeElements2 = NumMyRows2*2; //put elements on even number procs 
  if (NumProc % 2 == 1 && MyPID == NumProc-1)
    NumMyRangeElements2 = NumMyRows2; //If number of procs is odd, put
			// the last NumMyElements2 elements on the last proc
  Epetra_Map RangeMap(-1,NumMyRangeElements2,0,Comm);
  Epetra_CrsMatrix A2(Copy,RowMap,ColMap,NumMyElements2);
  double * Values2 = new double[NumMyElements2];
  int * Indices2 = new int[NumMyElements2]; 

  for (int i=0; i<NumMyElements2; i++) {
    Values2[i] = i+MyPID;
    Indices2[i]=i;
  }

  A2.InsertMyValues(0,NumMyElements2,Values2,Indices2);
  A2.FillComplete(DomainMap,RangeMap,false);
  Epetra_CrsMatrix A2copy(A2);

  double * RowLeftScaleValues = new double[NumMyRows2];
  double * ColRightScaleValues = new double[NumMyElements2];
  int RowLoopLength = RowMap.MaxMyGID()-RowMap.MinMyGID()+1;
  for (int i=0; i<RowLoopLength; i++)
    RowLeftScaleValues[i] = (i + RowMap.MinMyGID() ) % 2 + 1;
  // For the column map, all procs own all elements
  for (int  i=0; i<NumMyElements2;i++)
    ColRightScaleValues[i] = i % 2 + 1;

  int RangeLoopLength = RangeMap.MaxMyGID()-RangeMap.MinMyGID()+1;
  double * RangeLeftScaleValues = new double[RangeLoopLength];
  int DomainLoopLength = DomainMap.MaxMyGID()-DomainMap.MinMyGID()+1;
   double * DomainRightScaleValues = new double[DomainLoopLength];
  for (int i=0; i<RangeLoopLength; i++)
    RangeLeftScaleValues[i] = 1.0/((i + RangeMap.MinMyGID() ) % 2 + 1);
  for (int  i=0; i<DomainLoopLength;i++)
    DomainRightScaleValues[i] = 1.0/((i + DomainMap.MinMyGID() ) % 2 + 1);
                                                                                
  Epetra_Vector xRow(View,RowMap,RowLeftScaleValues);
  Epetra_Vector xCol(View,ColMap,ColRightScaleValues);
  Epetra_Vector xRange(View,RangeMap,RangeLeftScaleValues);
  Epetra_Vector xDomain(View,DomainMap,DomainRightScaleValues);

  double A2infNorm = A2.NormInf();
  double A2oneNorm = A2.NormOne();

  if (verbose1) cout << A2;
  EPETRA_TEST_ERR(A2.LeftScale(xRow),ierr);
  double A2infNorm1 = A2.NormInf();
  double A2oneNorm1 = A2.NormOne();
  bool ScalingBroke = false;
  if (A2infNorm1>2*A2infNorm||A2infNorm1<A2infNorm) {
    EPETRA_TEST_ERR(-31,ierr);
    ScalingBroke = true;
  }
  if (A2oneNorm1>2*A2oneNorm||A2oneNorm1<A2oneNorm) {

    EPETRA_TEST_ERR(-32,ierr);
    ScalingBroke = true;
  }
  if (verbose1) cout << A2;
  EPETRA_TEST_ERR(A2.RightScale(xCol),ierr);
  double A2infNorm2 = A2.NormInf();
  double A2oneNorm2 = A2.NormOne();
  if (A2infNorm2>=2*A2infNorm1||A2infNorm2<=A2infNorm1) {
    EPETRA_TEST_ERR(-33,ierr);
    ScalingBroke = true;
  }
  if (A2oneNorm2>2*A2oneNorm1||A2oneNorm2<=A2oneNorm1) {
    EPETRA_TEST_ERR(-34,ierr);
    ScalingBroke = true;
  }
  if (verbose1) cout << A2;
  EPETRA_TEST_ERR(A2.RightScale(xDomain),ierr);
  double A2infNorm3 = A2.NormInf();
  double A2oneNorm3 = A2.NormOne();
  // The last two scaling ops cancel each other out
  if (A2infNorm3!=A2infNorm1) {
    EPETRA_TEST_ERR(-35,ierr)
    ScalingBroke = true;
  }
  if (A2oneNorm3!=A2oneNorm1) {
    EPETRA_TEST_ERR(-36,ierr)
    ScalingBroke = true;
  }
  if (verbose1) cout << A2;
  EPETRA_TEST_ERR(A2.LeftScale(xRange),ierr);
  double A2infNorm4 = A2.NormInf();
  double A2oneNorm4 = A2.NormOne();
  // The 4 scaling ops all cancel out
  if (A2infNorm4!=A2infNorm) {
    EPETRA_TEST_ERR(-37,ierr)
    ScalingBroke = true;
  }
  if (A2oneNorm4!=A2oneNorm) {
    EPETRA_TEST_ERR(-38,ierr)
    ScalingBroke = true;
  }

  //
  //  Now try changing the values underneath and make sure that 
  //  telling one process about the change causes NormInf() and 
  //  NormOne() to recompute the norm on all processes.
  //
  
  double *values; 
  int num_my_rows = A2.NumMyRows() ; 
  int num_entries;

  for ( int  i=0 ; i< num_my_rows; i++ ) {
    EPETRA_TEST_ERR( A2.ExtractMyRowView( i, num_entries, values ), ierr );
    for ( int j = 0 ; j <num_entries; j++ ) {
      values[j] *= 2.0; 
    }
  }


  if ( MyPID == 0 )
    A2.SumIntoGlobalValues( 0, 0, 0, 0 ) ; 

  double A2infNorm5 = A2.NormInf();
  double A2oneNorm5 = A2.NormOne();

  if (A2infNorm5!=2.0 * A2infNorm4) {
    EPETRA_TEST_ERR(-39,ierr)
    ScalingBroke = true;
  }
  if (A2oneNorm5!= 2.0 * A2oneNorm4) {
    EPETRA_TEST_ERR(-40,ierr)
    ScalingBroke = true;
  }

  //
  //  Restore the values underneath
  //
  for ( int  i=0 ; i< num_my_rows; i++ ) {
    EPETRA_TEST_ERR( A2.ExtractMyRowView( i, num_entries, values ), ierr );
    for ( int j = 0 ; j <num_entries; j++ ) {
      values[j] /= 2.0; 
    }
  }

  if (verbose1) cout << A2;

  if (ScalingBroke) {
    if (verbose) cout << endl << "LeftScale and RightScale tests FAILED" << endl << endl;
  }
  else {
    if (verbose) cout << endl << "LeftScale and RightScale tests PASSED" << endl << endl;
  }

  Comm.Barrier();

  if (verbose) cout << "\n\n*****Testing InvRowMaxs and InvColMaxs" << endl << endl;

  if (verbose1) cout << A2 << endl;
  EPETRA_TEST_ERR(A2.InvRowMaxs(xRow),ierr);
  EPETRA_TEST_ERR(A2.InvRowMaxs(xRange),ierr);
  if (verbose1) cout << xRow << endl << xRange << endl;

  if (verbose) cout << "\n\n*****Testing InvRowSums and InvColSums" << endl << endl;
  bool InvSumsBroke = false;
// Works!
  EPETRA_TEST_ERR(A2.InvRowSums(xRow),ierr);
  if (verbose1) cout << xRow;
  EPETRA_TEST_ERR(A2.LeftScale(xRow),ierr);
  float A2infNormFloat = A2.NormInf();
  if (verbose1) cout << A2 << endl;
  if (fabs(1.0-A2infNormFloat) > 1.e-5) {
    EPETRA_TEST_ERR(-41,ierr);
    InvSumsBroke = true;
  }

  // Works
  int expectedcode = 1;
  if (Comm.NumProc()>1) expectedcode = 0;
  EPETRA_TEST_ERR(!(A2.InvColSums(xDomain)==expectedcode),ierr); // This matrix has a single row, the first column has a zero, so a warning is issued.
  if (verbose1) cout << xDomain << endl;
  EPETRA_TEST_ERR(A2.RightScale(xDomain),ierr);
  float A2oneNormFloat2 = A2.NormOne();
  if (verbose1) cout << A2;
  if (fabs(1.0-A2oneNormFloat2)>1.e-5) {
    EPETRA_TEST_ERR(-42,ierr)
    InvSumsBroke = true;
  }

// Works!
  EPETRA_TEST_ERR(A2.InvRowSums(xRange),ierr);

  if (verbose1) cout << xRange;
  EPETRA_TEST_ERR(A2.LeftScale(xRange),ierr);
  float A2infNormFloat2 = A2.NormInf(); // We use a float so that rounding error
	// will not prevent the sum from being 1.0.
  if (verbose1) cout << A2;
  if (fabs(1.0-A2infNormFloat2)>1.e-5) {
    cout << "InfNorm should be = 1, but InfNorm = " << A2infNormFloat2 << endl;
    EPETRA_TEST_ERR(-43,ierr);
    InvSumsBroke = true;
  }

  // Doesn't work - may not need this test because column ownership is not unique
  /*  EPETRA_TEST_ERR(A2.InvColSums(xCol),ierr);
cout << xCol;
  EPETRA_TEST_ERR(A2.RightScale(xCol),ierr);
  float A2oneNormFloat = A2.NormOne();
cout << A2;
  if (fabs(1.0-A2oneNormFloat)>1.e-5) {
    EPETRA_TEST_ERR(-44,ierr);
    InvSumsBroke = true;
  }
  */
  delete [] ColRightScaleValues;
  delete [] DomainRightScaleValues;
  if (verbose) cout << "Begin partial sum testing." << endl;
  // Test with a matrix that has partial sums for a subset of the rows 
  // on multiple processors. (Except for the serial case, of course.)
  int NumMyRows3 = 2; // Changing this requires further changes below
  int * myGlobalElements = new int[NumMyRows3];
  for (int i=0; i<NumMyRows3; i++) myGlobalElements[i] = MyPID+i;
  Epetra_Map RowMap3(NumProc*2, NumMyRows3, myGlobalElements, 0, Comm);
  int NumMyElements3 = 5;
  Epetra_CrsMatrix A3(Copy, RowMap3, NumMyElements3);
  double * Values3 = new double[NumMyElements3];
  int * Indices3 = new int[NumMyElements3];
  for (int i=0; i < NumMyElements3; i++) {
    Values3[i] = (int) (MyPID + (i+1));
    Indices3[i]=i;
  }
  for (int i=0; i<NumMyRows3; i++) {
    A3.InsertGlobalValues(myGlobalElements[i],NumMyElements3,Values3,Indices3);
  }
  Epetra_Map RangeMap3(NumProc+1, 0, Comm);
  Epetra_Map DomainMap3(NumMyElements3, 0, Comm);
  EPETRA_TEST_ERR(A3.FillComplete(DomainMap3, RangeMap3,false),ierr);
  if (verbose1) cout << A3;
  Epetra_Vector xRange3(RangeMap3,false);
  Epetra_Vector xDomain3(DomainMap3,false);

  EPETRA_TEST_ERR(A3.InvRowSums(xRange3),ierr);

  if (verbose1) cout << xRange3;
  EPETRA_TEST_ERR(A3.LeftScale(xRange3),ierr);
  float A3infNormFloat = A3.NormInf();
  if (verbose1) cout << A3;
  if (1.0!=A3infNormFloat) {
    cout << "InfNorm should be = 1, but InfNorm = " << A3infNormFloat <<endl;
    EPETRA_TEST_ERR(-61,ierr);
    InvSumsBroke = true;
  }
  // we want to take the transpose of our matrix and fill in different values.
  int NumMyColumns3 = NumMyRows3;
  Epetra_Map ColMap3cm(RowMap3); 
  Epetra_Map RowMap3cm(A3.ColMap());

  Epetra_CrsMatrix A3cm(Copy,RowMap3cm,ColMap3cm,NumProc+1);
  double *Values3cm = new double[NumMyColumns3];
  int * Indices3cm = new int[NumMyColumns3];
  for (int i=0; i<NumMyColumns3; i++) {
    Values3cm[i] = MyPID + i + 1;
    Indices3cm[i]= i + MyPID;
  }
  for (int ii=0; ii<NumMyElements3; ii++) {
    A3cm.InsertGlobalValues(ii, NumMyColumns3, Values3cm, Indices3cm);
  }

  // The DomainMap and the RangeMap from the last test will work fine for 
  // the RangeMap and DomainMap, respectively, but I will make copies to
  // avaoid confusion when passing what looks like a DomainMap where we
  // need a RangeMap and vice vera.
  Epetra_Map RangeMap3cm(DomainMap3);
  Epetra_Map DomainMap3cm(RangeMap3);
  EPETRA_TEST_ERR(A3cm.FillComplete(DomainMap3cm,RangeMap3cm),ierr);
  if (verbose1) cout << A3cm << endl;

  // Again, we can copy objects from the last example.
  //Epetra_Vector xRange3cm(xDomain3); //Don't use at this time
  Epetra_Vector xDomain3cm(DomainMap3cm,false);

  EPETRA_TEST_ERR(A3cm.InvColSums(xDomain3cm),ierr);

  if (verbose1) cout << xDomain3cm << endl;

  EPETRA_TEST_ERR(A3cm.RightScale(xDomain3cm),ierr);
  float A3cmOneNormFloat  = A3cm.NormOne();
  if (verbose1) cout << A3cm << endl;
  if (1.0!=A3cmOneNormFloat) {
    cout << "OneNorm should be = 1, but OneNorm = " << A3cmOneNormFloat << endl;
    EPETRA_TEST_ERR(-62,ierr);
    InvSumsBroke = true;
  }
  
  if (verbose) cout << "End partial sum testing" << endl;
  if (verbose) cout << "Begin replicated testing" << endl;

  // We will now view the shared row as a repliated row, rather than one 
  // that has partial sums of its entries on mulitple processors.
  // We will reuse much of the data used for the partial sum tesitng.
  Epetra_Vector xRow3(RowMap3,false); 
  Epetra_CrsMatrix A4(Copy, RowMap3, NumMyElements3);
  for (int ii=0; ii < NumMyElements3; ii++) {
    Values3[ii] = (int)((ii*.6)+1.0);
  }
  for (int ii=0; ii<NumMyRows3; ii++) {
    A4.InsertGlobalValues(myGlobalElements[ii],NumMyElements3,Values3,Indices3);
  }
  EPETRA_TEST_ERR(A4.FillComplete(DomainMap3, RangeMap3,false),ierr);
  if (verbose1) cout << A4 << endl;
  // The next two lines should be expanded into a verifiable test.
  EPETRA_TEST_ERR(A4.InvRowMaxs(xRow3),ierr);
  EPETRA_TEST_ERR(A4.InvRowMaxs(xRange3),ierr);
  if (verbose1) cout << xRow3 << xRange3;

  EPETRA_TEST_ERR(A4.InvRowSums(xRow3),ierr);                      
  if (verbose1) cout << xRow3;
  EPETRA_TEST_ERR(A4.LeftScale(xRow3),ierr);
  float A4infNormFloat = A4.NormInf();
  if (verbose1) cout << A4;
  if (2.0!=A4infNormFloat && NumProc != 1) {
    if (verbose1) cout << "InfNorm should be = 2 (because one column is replicated on two processors and NormOne() does not handle replication), but InfNorm = " << A4infNormFloat <<endl;
    EPETRA_TEST_ERR(-63,ierr);
    InvSumsBroke = true;
  }
  else if (1.0!=A4infNormFloat && NumProc == 1) {
    if (verbose1) cout << "InfNorm should be = 1, but InfNorm = " << A4infNormFloat <<endl;
    EPETRA_TEST_ERR(-63,ierr);
    InvSumsBroke = true;
  }
  
  Epetra_Vector xCol3cm(ColMap3cm,false);
  Epetra_CrsMatrix A4cm(Copy, RowMap3cm, ColMap3cm, NumProc+1);
  //Use values from A3cm
  for (int ii=0; ii<NumMyElements3; ii++) {
    A4cm.InsertGlobalValues(ii,NumMyColumns3,Values3cm,Indices3cm);
  }
  EPETRA_TEST_ERR(A4cm.FillComplete(DomainMap3cm, RangeMap3cm,false),ierr);
  if (verbose1) cout << A4cm << endl;
  // The next two lines should be expanded into a verifiable test.
  EPETRA_TEST_ERR(A4cm.InvColMaxs(xCol3cm),ierr);
  EPETRA_TEST_ERR(A4cm.InvColMaxs(xDomain3cm),ierr);
  if (verbose1) cout << xCol3cm << xDomain3cm;

  EPETRA_TEST_ERR(A4cm.InvColSums(xCol3cm),ierr);
 
  if (verbose1) cout << xCol3cm << endl;
  EPETRA_TEST_ERR(A4cm.RightScale(xCol3cm),ierr);
  float A4cmOneNormFloat = A4cm.NormOne();
  if (verbose1) cout << A4cm << endl;
  if (2.0!=A4cmOneNormFloat && NumProc != 1) {
    if (verbose1) cout << "OneNorm should be = 2 (because one column is replicated on two processors and NormOne() does not handle replication), but OneNorm = " << A4cmOneNormFloat << endl;
    EPETRA_TEST_ERR(-64,ierr);
    InvSumsBroke = true;
  }
  else if (1.0!=A4cmOneNormFloat && NumProc == 1) {
    if (verbose1) cout << "OneNorm should be = 1, but OneNorm = " << A4infNormFloat <<endl;
    EPETRA_TEST_ERR(-64,ierr);
    InvSumsBroke = true;
  }

  if (verbose) cout << "End replicated testing" << endl;

  if (InvSumsBroke) {
    if (verbose) cout << endl << "InvRowSums tests FAILED" << endl << endl;
  }
  else
    if (verbose) cout << endl << "InvRowSums tests PASSED" << endl << endl;

  A3cm.PutScalar(2.0);
  int nnz_A3cm = A3cm.Graph().NumGlobalNonzeros();
  double check_frobnorm = sqrt(nnz_A3cm*4.0);
  double frobnorm = A3cm.NormFrobenius();

  bool frobnorm_test_failed = false;
  if (fabs(check_frobnorm-frobnorm) > 5.e-5) {
    frobnorm_test_failed = true;
  }

  if (frobnorm_test_failed) {
    if (verbose) std::cout << "Frobenius-norm test FAILED."<<std::endl;
    EPETRA_TEST_ERR(-65, ierr);
  }

  delete [] Values2;
  delete [] Indices2;
  delete [] myGlobalElements;
  delete [] Values3;
  delete [] Indices3;
  delete [] Values3cm;
  delete [] Indices3cm;
  delete [] RangeLeftScaleValues;
  delete [] RowLeftScaleValues;
#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

/* end main
*/
return ierr ;
}
Esempio n. 7
0
//=============================================================================
Epetra_Map * Epetra_Map::RemoveEmptyProcesses() const
{
#ifdef HAVE_MPI
  const Epetra_MpiComm * MpiComm = dynamic_cast<const Epetra_MpiComm*>(&Comm());

  // If the Comm isn't MPI, just treat this as a copy constructor
  if(!MpiComm) return new Epetra_Map(*this);

  MPI_Comm NewComm,MyMPIComm = MpiComm->Comm();

  // Create the new communicator.  MPI_Comm_split returns a valid
  // communicator on all processes.  On processes where color == MPI_UNDEFINED,
  // ignore the result.  Passing key == 0 tells MPI to order the
  // processes in the new communicator by their rank in the old
  // communicator.
  const int color = (NumMyElements() == 0) ? MPI_UNDEFINED : 1;

  // MPI_Comm_split must be called collectively over the original
  // communicator.  We can't just call it on processes with color
  // one, even though we will ignore its result on processes with
  // color zero.
  int rv = MPI_Comm_split(MyMPIComm,color,0,&NewComm);
  if(rv!=MPI_SUCCESS) throw ReportError("Epetra_Map::RemoveEmptyProcesses: MPI_Comm_split failed.",-1);

  if(color == MPI_UNDEFINED)
    return 0; // We're not in the new map
  else {
    Epetra_MpiComm * NewEpetraComm = new Epetra_MpiComm(NewComm);

    // Use the copy constructor for a new map, but basically because it does nothing useful
    Epetra_Map * NewMap = new Epetra_Map(*this);

    // Get rid of the old BlockMapData, now make a new one from scratch...
    NewMap->CleanupData();
    if(GlobalIndicesInt()) {
#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
      NewMap->BlockMapData_ = new Epetra_BlockMapData(NumGlobalElements(),0,IndexBase(),*NewEpetraComm,false);
#endif
    }
    else {
#ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
      NewMap->BlockMapData_ = new Epetra_BlockMapData(NumGlobalElements64(),0,IndexBase64(),*NewEpetraComm,true);
#endif
    }

    // Now copy all of the relevent bits of BlockMapData...
    //    NewMap->BlockMapData_->Comm_                    = NewEpetraComm;
    NewMap->BlockMapData_->LID_                     = BlockMapData_->LID_;
#ifndef EPETRA_NO_32BIT_GLOBAL_INDICES
    NewMap->BlockMapData_->MyGlobalElements_int_    = BlockMapData_->MyGlobalElements_int_;
#endif
#ifndef EPETRA_NO_64BIT_GLOBAL_INDICES
    NewMap->BlockMapData_->MyGlobalElements_LL_     = BlockMapData_->MyGlobalElements_LL_;
#endif
    NewMap->BlockMapData_->FirstPointInElementList_ = BlockMapData_->FirstPointInElementList_;
    NewMap->BlockMapData_->ElementSizeList_         = BlockMapData_->ElementSizeList_;
    NewMap->BlockMapData_->PointToElementList_      = BlockMapData_->PointToElementList_;

    NewMap->BlockMapData_->NumGlobalElements_       = BlockMapData_->NumGlobalElements_;
    NewMap->BlockMapData_->NumMyElements_           = BlockMapData_->NumMyElements_;
    NewMap->BlockMapData_->IndexBase_               = BlockMapData_->IndexBase_;
    NewMap->BlockMapData_->ElementSize_             = BlockMapData_->ElementSize_;
    NewMap->BlockMapData_->MinMyElementSize_        = BlockMapData_->MinMyElementSize_;
    NewMap->BlockMapData_->MaxMyElementSize_        = BlockMapData_->MaxMyElementSize_;
    NewMap->BlockMapData_->MinElementSize_          = BlockMapData_->MinElementSize_;
    NewMap->BlockMapData_->MaxElementSize_          = BlockMapData_->MaxElementSize_;
    NewMap->BlockMapData_->MinAllGID_               = BlockMapData_->MinAllGID_;
    NewMap->BlockMapData_->MaxAllGID_               = BlockMapData_->MaxAllGID_;
    NewMap->BlockMapData_->MinMyGID_                = BlockMapData_->MinMyGID_;
    NewMap->BlockMapData_->MaxMyGID_                = BlockMapData_->MaxMyGID_;
    NewMap->BlockMapData_->MinLID_                  = BlockMapData_->MinLID_;
    NewMap->BlockMapData_->MaxLID_                  = BlockMapData_->MaxLID_;
    NewMap->BlockMapData_->NumGlobalPoints_         = BlockMapData_->NumGlobalPoints_;
    NewMap->BlockMapData_->NumMyPoints_             = BlockMapData_->NumMyPoints_;
    NewMap->BlockMapData_->ConstantElementSize_     = BlockMapData_->ConstantElementSize_;
    NewMap->BlockMapData_->LinearMap_               = BlockMapData_->LinearMap_;
    NewMap->BlockMapData_->DistributedGlobal_       = NewEpetraComm->NumProc()==1 ? false : BlockMapData_->DistributedGlobal_;
    NewMap->BlockMapData_->OneToOneIsDetermined_    = BlockMapData_->OneToOneIsDetermined_;
    NewMap->BlockMapData_->OneToOne_                = BlockMapData_->OneToOne_;
    NewMap->BlockMapData_->GlobalIndicesInt_        = BlockMapData_->GlobalIndicesInt_;
    NewMap->BlockMapData_->GlobalIndicesLongLong_   = BlockMapData_->GlobalIndicesLongLong_;
    NewMap->BlockMapData_->LastContiguousGID_       = BlockMapData_->LastContiguousGID_;
    NewMap->BlockMapData_->LastContiguousGIDLoc_    = BlockMapData_->LastContiguousGIDLoc_;
    NewMap->BlockMapData_->LIDHash_                 = BlockMapData_->LIDHash_ ? new Epetra_HashTable<int>(*BlockMapData_->LIDHash_) : 0;

    // Delay directory construction
    NewMap->BlockMapData_->Directory_               = 0;

    // Cleanup
    delete NewEpetraComm;

    return NewMap;
  }
#else
    // MPI isn't compiled, so just treat this as a copy constructor
    return new Epetra_Map(*this);
#endif
}
std::pair<Teuchos::RCP<std::vector<std::size_t> >,
          Teuchos::RCP<std::vector<Teuchos::Tuple<double,3> > > >
getSideIdsAndCoords(const STK_Interface & mesh,
              const std::string & sideName, const std::string type_)
{
   Epetra_MpiComm Comm(mesh.getBulkData()->parallel());

   unsigned physicalDim = mesh.getDimension();
 
   // grab local IDs and coordinates on this side
   // and build local epetra vector
   //////////////////////////////////////////////////////////////////

   std::pair<Teuchos::RCP<std::vector<std::size_t> >,
             Teuchos::RCP<std::vector<Teuchos::Tuple<double,3> > > > sidePair =
          getLocalSideIdsAndCoords(mesh,sideName,type_);

   std::vector<std::size_t> & local_side_ids = *sidePair.first;
   std::vector<Teuchos::Tuple<double,3> > & local_side_coords = *sidePair.second;
   int nodeCount = local_side_ids.size();

   // build local Epetra objects
   Epetra_Map idMap(-1,nodeCount,0,Comm);
   Teuchos::RCP<Epetra_IntVector> localIdVec = Teuchos::rcp(new Epetra_IntVector(idMap));
   Teuchos::RCP<Epetra_MultiVector> localCoordVec = Teuchos::rcp(new Epetra_MultiVector(idMap,physicalDim));

   // copy local Ids into Epetra vector
   for(std::size_t n=0;n<local_side_ids.size();n++) {
      std::size_t nodeId = local_side_ids[n];
      Teuchos::Tuple<double,3> & coords = local_side_coords[n];

      (*localIdVec)[n] = nodeId;
      for(unsigned d=0;d<physicalDim;d++)
         (*(*localCoordVec)(d))[n] = coords[d];
   }

   // fully distribute epetra vector across all processors 
   // (these are "distributed" or "dist" objects)
   //////////////////////////////////////////////////////////////

   int dist_nodeCount = idMap.NumGlobalElements();

   // build global epetra objects
   Epetra_LocalMap distMap(dist_nodeCount,0,Comm);
   Teuchos::RCP<Epetra_IntVector> distIdVec = Teuchos::rcp(new Epetra_IntVector(distMap));
   Teuchos::RCP<Epetra_MultiVector> distCoordVec = Teuchos::rcp(new Epetra_MultiVector(distMap,physicalDim));

   // export to the localVec object from the "vector" object
   Epetra_Import importer(distMap,idMap);
   TEUCHOS_ASSERT(distIdVec->Import(*localIdVec,importer,Insert)==0);
   TEUCHOS_ASSERT(distCoordVec->Import(*localCoordVec,importer,Insert)==0);

   // convert back to generic stl vector objects
   ///////////////////////////////////////////////////////////

   Teuchos::RCP<std::vector<std::size_t> > dist_side_ids
      = Teuchos::rcp(new std::vector<std::size_t>(dist_nodeCount));
   Teuchos::RCP<std::vector<Teuchos::Tuple<double,3> > > dist_side_coords
      = Teuchos::rcp(new std::vector<Teuchos::Tuple<double,3> >(dist_nodeCount));

   // copy local Ids into Epetra vector
   for(std::size_t n=0;n<dist_side_ids->size();n++) {
      (*dist_side_ids)[n] = (*distIdVec)[n];

      Teuchos::Tuple<double,3> & coords = (*dist_side_coords)[n];
      for(unsigned d=0;d<physicalDim;d++)
         coords[d] = (*(*distCoordVec)(d))[n];
   }

   return std::make_pair(dist_side_ids,dist_side_coords);
}
Esempio n. 9
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  bool verbose = (Comm.MyPID() == 0);

  Teuchos::ParameterList GaleriList;
  const int nx = 30;
  GaleriList.set("n", nx * nx);
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx);
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap64("Linear", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_RowMatrix> Matrix = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );

  Teuchos::ParameterList List, DefaultList;

  // test the preconditioner
  int TestPassed = true;

  if (!Test<Ifpack_Chebyshev>(Matrix,List)) 
  {
    TestPassed = false;
  }

  List.set("polynomial: degree",3);
  if (!Test<Ifpack_Polynomial>(Matrix,List))
  {
    TestPassed = false;
  }

  List = DefaultList;
  List.set("krylov: tolerance", 1e-14);
  List.set("krylov: iterations", 100);
  List.set("krylov: preconditioner", 2);
  List.set("krylov: block size", 9);
  List.set("krylov: number of sweeps", 2);
  if (!Test<Ifpack_Krylov>(Matrix,List))
  {
    TestPassed = false;
  }

  if (!Test< Ifpack_AdditiveSchwarz<Ifpack_Krylov> >(Matrix,List)) {
    TestPassed = false;
  }

  if (!Test<Ifpack_Amesos>(Matrix,List)) 
  {
    TestPassed = false;
  }

  // FIXME
#if 0
  if (!Test<Ifpack_AdditiveSchwarz<Ifpack_BlockRelaxation<Ifpack_DenseContainer> > >(Matrix,List)) {
    TestPassed = false;
  }
#endif

  // this is ok as long as just one sweep is applied
  List = DefaultList;
  List.set("relaxation: type", "Gauss-Seidel");
  if (!Test<Ifpack_PointRelaxation>(Matrix,List)) {
    TestPassed = false;
  }

  // this is ok as long as just one sweep is applied
  List = DefaultList;
  List.set("relaxation: type", "symmetric Gauss-Seidel");
  List.set("relaxation: sweeps", 5);
  List.set("partitioner: local parts", 128);
  List.set("partitioner: type", "linear");
  if (!Test<Ifpack_BlockRelaxation<Ifpack_DenseContainer> >(Matrix,List)) {
    TestPassed = false;
  }
 
  // this is ok as long as just one sweep is applied
  List = DefaultList;
  List.set("relaxation: type", "symmetric Gauss-Seidel");
  List.set("partitioner: local parts", 128);
  List.set("partitioner: type", "linear");
  if (!Test<Ifpack_BlockRelaxation<Ifpack_SparseContainer<Ifpack_Amesos> > >(Matrix,List)) {
    TestPassed = false;
  }

  // this is ok as long as just one sweep is applied
  List = DefaultList;
  List.set("relaxation: type", "symmetric Gauss-Seidel");
  List.set("partitioner: local parts", 128);
  List.set("partitioner: type", "linear");
  if (!Test<Ifpack_AdditiveSchwarz<Ifpack_BlockRelaxation<Ifpack_SparseContainer<Ifpack_Amesos> > > >(Matrix,List)) {
    TestPassed = false;
  }
  if (!TestPassed) {
    cerr << "Test `TestAll.exe' FAILED!" << endl;
    exit(EXIT_FAILURE);
  }

#ifdef HAVE_MPI
  MPI_Finalize(); 
#endif
  if (verbose)
    cout << "Test `TestAll.exe' passed!" << endl;

  return(EXIT_SUCCESS);
}
Esempio n. 10
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif
  
  bool verbose = (Comm.MyPID() == 0);

  // set global dimension to 5, could be any number
  int NumGlobalElements = 5;
  // create a map
  Epetra_Map Map(NumGlobalElements,0,Comm);
  // local number of rows
  int NumMyElements = Map.NumMyElements();
  // get update list
  int * MyGlobalElements = Map.MyGlobalElements( );

  // ============= CONSTRUCTION OF THE MATRIX ===========================
  // Create a Epetra_Matrix

  Epetra_CrsMatrix A(Copy,Map,3);

  // Add  rows one-at-a-time

  double *Values = new double[2];
  Values[0] = -1.0; Values[1] = -1.0;
  int *Indices = new int[2];
  double two = 2.0;
  int NumEntries;

  for( int i=0 ; i<NumMyElements; ++i ) {
    if (MyGlobalElements[i]==0) {
	Indices[0] = 1;
	NumEntries = 1;
    } else if (MyGlobalElements[i] == NumGlobalElements-1) {
      Indices[0] = NumGlobalElements-2;
      NumEntries = 1;
    } else {
      Indices[0] = MyGlobalElements[i]-1;
      Indices[1] = MyGlobalElements[i]+1;
      NumEntries = 2;
    }
    A.InsertGlobalValues(MyGlobalElements[i], NumEntries, Values, Indices);
    // Put in the diagonal entry
    A.InsertGlobalValues(MyGlobalElements[i], 1, &two, MyGlobalElements+i);
  }
  
  // Finish up
  A.FillComplete();

  // ================ CONSTRUCTION OF VECTORS =======================
  
  // build up two distributed vectors q and z, and compute
  // q = A * z
  Epetra_Vector q(A.RowMap());
  Epetra_Vector z(A.RowMap());

  // Fill z with 1's
  z.PutScalar( 1.0 );

  // ================ USE OF TIME AND FLOPS =========================
  
  Epetra_Flops counter;
  A.SetFlopCounter(counter);
  Epetra_Time timer(Comm);

  A.Multiply(false, z, q); // Compute q = A*z

  double elapsed_time = timer.ElapsedTime();
  double total_flops =counter.Flops();
  if (verbose)
    cout << "Total ops: " << total_flops << endl;
  double MFLOPs = total_flops/elapsed_time/1000000.0;
  if (verbose)
    cout << "Total MFLOPs  for mat-vec = " << MFLOPs << endl<< endl;

  double dotProduct;
  z.SetFlopCounter(counter);
  timer.ResetStartTime();
  z.Dot(q, &dotProduct);

  total_flops =counter.Flops();
  if (verbose)
    cout << "Total ops: " << total_flops << endl;

  elapsed_time = timer.ElapsedTime();
  if (elapsed_time != 0.0)
    MFLOPs = (total_flops / elapsed_time) / 1000000.0;
  else
    MFLOPs = 0;

  if (verbose)
  {
    cout << "Total MFLOPs for vec-vec = " << MFLOPs << endl<< endl;
    cout << "q dot z = " << dotProduct << endl;
  }

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return( 0 );

} /* main */
Esempio n. 11
0
int main(int argc, char *argv[])
{
  int n = 10;
  int ierr = 0;
  double reltol = 1.0e-14;
  double abstol = 1.0e-14;
  int MyPID = 0;

  try {

    // Initialize MPI
#ifdef HAVE_MPI
    MPI_Init(&argc,&argv);
#endif

    // Create a communicator for Epetra objects
#ifdef HAVE_MPI
    Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
    Epetra_SerialComm Comm;
#endif

    MyPID = Comm.MyPID();

    // Create the map
    Epetra_Map map(n, 0, Comm);

    bool verbose = false;
    // Check for verbose output
    if (argc>1)
      if (argv[1][0]=='-' && argv[1][1]=='v')
    verbose = true;

    // Seed the random number generator in Teuchos.  We create random
    // bordering matrices and it is possible different processors might generate
    // different matrices.  By setting the seed, this shouldn't happen.
    Teuchos::ScalarTraits<double>::seedrandom(12345);

    // Create and initialize the parameter vector
    LOCA::ParameterVector pVector;
    pVector.addParameter("Param 1",  1.69);
    pVector.addParameter("Param 2", -9.7);
    pVector.addParameter("Param 3",  0.35);
    pVector.addParameter("Param 4", -0.78);
    pVector.addParameter("Param 5",  2.53);

    // Create parameter list
    Teuchos::RCP<Teuchos::ParameterList> paramList =
      Teuchos::rcp(new Teuchos::ParameterList);

    Teuchos::ParameterList& nlParams = paramList->sublist("NOX");
    Teuchos::ParameterList& nlPrintParams = nlParams.sublist("Printing");
    nlPrintParams.set("MyPID", MyPID);
    if (verbose)
       nlPrintParams.set("Output Information",
                  NOX::Utils::Error +
                  NOX::Utils::Details +
                  NOX::Utils::OuterIteration +
                  NOX::Utils::InnerIteration +
                  NOX::Utils::Warning +
                  NOX::Utils::TestDetails +
                  NOX::Utils::StepperIteration +
                  NOX::Utils::StepperDetails);
     else
       nlPrintParams.set("Output Information", NOX::Utils::Error);

    // Create global data object
    Teuchos::RCP<LOCA::GlobalData> globalData =
      LOCA::createGlobalData(paramList);

    Epetra_Vector clone_vec(map);
    NOX::Epetra::Vector nox_clone_vec(clone_vec);

    Teuchos::RCP<NOX::Abstract::Vector> x =
      nox_clone_vec.clone(NOX::ShapeCopy);
    x->random();

    Teuchos::RCP<NOX::Abstract::MultiVector> dx1 =
      nox_clone_vec.createMultiVector(3);
    Teuchos::RCP<NOX::Abstract::MultiVector> dx2 =
      nox_clone_vec.createMultiVector(1);
    Teuchos::RCP<NOX::Abstract::MultiVector> dx3 =
      nox_clone_vec.createMultiVector(2);
    Teuchos::RCP<NOX::Abstract::MultiVector> dx4 =
      nox_clone_vec.createMultiVector(2);
    dx1->random();
    dx2->random();
    dx3->init(0.0);
    dx4->random();

    Teuchos::RCP<NOX::Abstract::MultiVector> dx_all =
      dx1->clone(NOX::DeepCopy);
    dx_all->augment(*dx2);
    dx_all->augment(*dx3);
    dx_all->augment(*dx4);

    NOX::Abstract::MultiVector::DenseMatrix dp1(dx1->numVectors(),
                        pVector.length());
    NOX::Abstract::MultiVector::DenseMatrix dp2(dx2->numVectors(),
                        pVector.length());
    NOX::Abstract::MultiVector::DenseMatrix dp3(dx3->numVectors(),
                        pVector.length());
    NOX::Abstract::MultiVector::DenseMatrix dp4(dx4->numVectors(),
                        pVector.length());
    dp1.random();
    dp2.random();
    dp3.random();
    dp4.random();

    NOX::Abstract::MultiVector::DenseMatrix dp_all(dx_all->numVectors(),
                           pVector.length());
    for (int j=0; j<dp_all.numCols(); j++) {
      for (int i=0; i<dp1.numRows(); i++)
    dp_all(i,j) = dp1(i,j);
      for (int i=0; i<dp2.numRows(); i++)
    dp_all(dp1.numRows()+i,j) = dp2(i,j);
      for (int i=0; i<dp3.numRows(); i++)
    dp_all(dp1.numRows()+dp2.numRows()+i,j) = dp3(i,j);
      for (int i=0; i<dp4.numRows(); i++)
    dp_all(dp1.numRows()+dp2.numRows()+dp3.numRows()+i,j) = dp4(i,j);
    }


    std::vector< Teuchos::RCP<LOCA::MultiContinuation::ConstraintInterface> > constraintObjs(4);
    Teuchos::RCP<LinearConstraint> linear_constraint;

    linear_constraint = Teuchos::rcp(new LinearConstraint(dx1->numVectors(),
                              pVector,
                              nox_clone_vec));
    linear_constraint->setDgDx(*dx1);
    linear_constraint->setDgDp(dp1);
    linear_constraint->setIsZeroDX(false);
    constraintObjs[0] = linear_constraint;

    linear_constraint = Teuchos::rcp(new LinearConstraint(dx2->numVectors(),
                              pVector,
                              nox_clone_vec));
    linear_constraint->setDgDx(*dx2);
    linear_constraint->setDgDp(dp2);
    linear_constraint->setIsZeroDX(false);
    constraintObjs[1] = linear_constraint;

    linear_constraint = Teuchos::rcp(new LinearConstraint(dx3->numVectors(),
                              pVector,
                              nox_clone_vec));
    linear_constraint->setDgDx(*dx3);
    linear_constraint->setDgDp(dp3);
    linear_constraint->setIsZeroDX(true);
    constraintObjs[2] = linear_constraint;

    linear_constraint = Teuchos::rcp(new LinearConstraint(dx4->numVectors(),
                              pVector,
                              nox_clone_vec));
    linear_constraint->setDgDx(*dx4);
    linear_constraint->setDgDp(dp4);
    linear_constraint->setIsZeroDX(false);
    constraintObjs[3] = linear_constraint;

    // Check some statistics on the solution
    NOX::TestCompare testCompare(globalData->locaUtils->out(),
                 *(globalData->locaUtils));

    LOCA::MultiContinuation::CompositeConstraint composite(globalData,
                               constraintObjs);
    composite.setX(*x);

    LinearConstraint combined(dx_all->numVectors(), pVector, nox_clone_vec);
    combined.setDgDx(*dx_all);
    combined.setDgDp(dp_all);
    combined.setX(*x);

    //
    // test computeConstraints()
    //

    composite.computeConstraints();
    combined.computeConstraints();

    int numConstraints = dx_all->numVectors();
    const NOX::Abstract::MultiVector::DenseMatrix& g_composite =
      composite.getConstraints();
    const NOX::Abstract::MultiVector::DenseMatrix& g_combined =
      combined.getConstraints();

    ierr += testCompare.testMatrix(
                 g_composite, g_combined, reltol, abstol,
                 "CompositeConstraint::computeConstraints()");

    //
    // test computeDP()
    //

    std::vector<int> paramIDs(3);
    paramIDs[0] = 1;
    paramIDs[1] = 2;
    paramIDs[2] = 4;
    NOX::Abstract::MultiVector::DenseMatrix dgdp_composite(
                            numConstraints,
                            paramIDs.size()+1);
    NOX::Abstract::MultiVector::DenseMatrix dgdp_combined(
                            numConstraints,
                            paramIDs.size()+1);
    dgdp_composite.putScalar(0.0);
    dgdp_combined.putScalar(0.0);
    composite.computeDP(paramIDs, dgdp_composite, false);
    combined.computeDP(paramIDs, dgdp_combined, false);

    ierr += testCompare.testMatrix(
                 dgdp_composite, dgdp_combined, reltol, abstol,
                 "CompositeConstraint::computeDP()");

    //
    // test multiplyDX()
    //

    composite.computeDX();
    combined.computeDX();

    int numMultiply = 5;
    Teuchos::RCP<NOX::Abstract::MultiVector> A =
      nox_clone_vec.createMultiVector(numMultiply);
    A->random();
    NOX::Abstract::MultiVector::DenseMatrix composite_multiply(numConstraints,
                                   numMultiply);
    NOX::Abstract::MultiVector::DenseMatrix combined_multiply(numConstraints,
                                  numMultiply);
    composite.multiplyDX(2.65, *A, composite_multiply);
    combined.multiplyDX(2.65, *A, combined_multiply);

    ierr += testCompare.testMatrix(composite_multiply, combined_multiply,
                    reltol, abstol,
                    "CompositeConstraint::multiplyDX()");

    //
    // test addDX() (No Trans)
    //

    int numAdd = 5;
    NOX::Abstract::MultiVector::DenseMatrix B1(numConstraints, numAdd);
    B1.random();
    NOX::Abstract::MultiVector::DenseMatrix B2(numAdd, numConstraints);
    B2.random();

    Teuchos::RCP<NOX::Abstract::MultiVector> composite_add1 =
      nox_clone_vec.createMultiVector(numAdd);
    composite_add1->random();
    Teuchos::RCP<NOX::Abstract::MultiVector> composite_add2 =
      nox_clone_vec.createMultiVector(numAdd);
    composite_add2->random();

    Teuchos::RCP<NOX::Abstract::MultiVector> combined_add1 =
      composite_add1->clone(NOX::DeepCopy);
    Teuchos::RCP<NOX::Abstract::MultiVector> combined_add2 =
      composite_add2->clone(NOX::DeepCopy);

    composite.addDX(Teuchos::NO_TRANS, 1.45, B1, 2.78, *composite_add1);
    combined.addDX(Teuchos::NO_TRANS, 1.45, B1, 2.78, *combined_add1);

    ierr += testCompare.testMultiVector(
                   *composite_add1, *combined_add1,
                   reltol, abstol,
                   "CompositeConstraint::addDX() (No Trans)");

    //
    // test addDX() (Trans)
    //

    composite.addDX(Teuchos::TRANS, 1.45, B2, 2.78, *composite_add2);
    combined.addDX(Teuchos::TRANS, 1.45, B2, 2.78, *combined_add2);

    ierr += testCompare.testMultiVector(
                   *composite_add2, *combined_add2,
                   reltol, abstol,
                   "CompositeConstraint::addDX() (Trans)");

    LOCA::destroyGlobalData(globalData);
  }

  catch (std::exception& e) {
    std::cout << e.what() << std::endl;
    ierr = 1;
  }
  catch (const char *s) {
    std::cout << s << std::endl;
    ierr = 1;
  }
  catch (...) {
    std::cout << "Caught unknown exception!" << std::endl;
    ierr = 1;
  }

  if (MyPID == 0) {
    if (ierr == 0)
      std::cout << "All tests passed!" << std::endl;
    else
      std::cout << ierr << " test(s) failed!" << std::endl;
  }

#ifdef HAVE_MPI
  MPI_Finalize() ;
#endif

  return ierr;
}
// ============================================================================
// Visualize aggregates and (for XYZ or VTK format) also plot vectors
// date: Aug-04
int ML_Epetra::MultiLevelPreconditioner::
Visualize(bool VizAggre, bool VizPreSmoother,
      bool VizPostSmoother, bool VizCycle,
      int NumApplPreSmoother, int NumApplPostSmoother,
      int NumCycleSmoother)
{
  ML_Aggregate *aggregates = agg_;

  char filename[80] = "";
  int NumDimensions = 0;
  ML_Aggregate_Viz_Stats *grid_info =
        (ML_Aggregate_Viz_Stats *) ml_->Grid[LevelID_[0]].Grid;

  double * x_coord = grid_info->x;
  double * y_coord = grid_info->y;
  double * z_coord = grid_info->z;

  if( x_coord ) NumDimensions++;
  if( y_coord ) NumDimensions++;
  if( z_coord ) NumDimensions++;

  assert( NumDimensions != 0 );

  if (VizAggre == true) {

    // stats about level matrix sizes
    if( verbose_ )
      std::cout << std::endl << "- number of rows for each level's matrix:" << std::endl << std::endl;

    for( int ilevel=0 ; ilevel < NumLevels_ ; ++ilevel ) {
      int imin, iavg, imax;
      int Nrows = ml_->Amat[LevelID_[ilevel]].outvec_leng/NumPDEEqns_;
      Comm().MinAll(&Nrows,&imin,1);
      Comm().MaxAll(&Nrows,&imax,1);
      Comm().SumAll(&Nrows,&iavg,1); iavg /= Comm().NumProc();

      if( verbose_ ) {
    printf( "\t(level %d) rows per process (min) = %d\n", ilevel, imin);
    printf( "\t(level %d) rows per process (avg) = %d\n", ilevel, iavg);
    printf( "\t(level %d) rows per process (max) = %d\n", ilevel, imax);
    std::cout << std::endl;
      }
    }

    if( verbose_ )
      std::cout << std::endl << "- analysis of the computational domain (finest level):"
    << std::endl << std::endl;
    ML_Aggregate_Stats_Analyze(ml_,aggregates);

  }

  // prepare output format. Now it can be:
  // - OpenDX (1D/2D/3D)
  // - XD3D (2D only)
  // - Paraview, or any other package that can read .vtk files (1D/2D/3D)

  int Format;
  std::string FileFormat = List_.get("viz: output format", "vtk");
  // you are a cool guy if you plot with "xyz"
  if( FileFormat == "xyz" ) Format = 1;
  // you are a poor man if you need "dx". God bless you.
  else if( FileFormat == "dx" ) Format = 0;
  // you are a very cool guy if you plot with the "vtk" option (paraview)
  else if( FileFormat == "vtk" ) Format = 2;
  else {
    std::cerr << ErrorMsg_ << "Option `viz: output format' has an incorrect" << std::endl
      << ErrorMsg_ << "value (" << FileFormat << "). Possible values are" << std::endl
      << ErrorMsg_ << "<dx> / <xyz> / <vtk>" << std::endl;
    exit( EXIT_FAILURE );
  }

  int ieqn             = List_.get("viz: equation to plot", -1);
  if (AMGSolver_ == ML_MAXWELL) ieqn = -1;
  if( ieqn >= NumPDEEqns_ ) ieqn = 0;
  bool PrintStarting   = List_.get("viz: print starting solution", false);

  ML_Smoother * ptr;
  double * tmp_rhs = new double[NumMyRows()];
  double * tmp_sol = new double[NumMyRows()];
  double * plot_me = new double[NumMyRows()/NumPDEEqns_];

  // Note that this requires the new version of the
  // visualization routines. OpenDX cannot visualize vectors.

  if( ( VizPreSmoother || VizPostSmoother || VizCycle ) && ( Format == 0) ) {
    std::cerr << std::endl;
    std::cerr << ErrorMsg_ << "Option `viz: output format' == `dx' cannot be used"
         << std::endl << ErrorMsg_
         << "to visualize the effect of smoothers and cycle." << std::endl;
    std::cerr << std::endl;
    VizPreSmoother = false;
    VizPostSmoother = false;
    VizCycle = false;
  }

  if( verbose_ )
    std::cout << std::endl << "- visualization:" << std::endl << std::endl;

  // =============================================================== //
  // cycle over all levels. Note that almost the same thing          //
  // is done for pre-smoothing, post-smoothing, and the effect       //
  // of the cycle itself. For each of these, I plot on file          //
  // the starting solution (before-), the final solution (after-),   //
  // for each equation, and for each level (smoother only).          //
  // All these junk works with XYZ only, and it should work in       //
  // 3D too (although I never tested in 3D).                         //
  //                                                                 //
  // JJH 3/11/2005 Paraview has been tested in 3D for .vtk output,   //
  // and it works.                                                   //
  // =============================================================== //

  std::cout << "cycling thru levels 0 to " << NumLevels_ -1 << std::endl;

  for( int ilevel=0 ; ilevel<NumLevels_ ; ++ilevel ) {

    // =================== //
    // plot the aggregates //
    // =================== //


    if( VizAggre )
      ML_Aggregate_Viz(ml_,aggregates,Format,NULL,NULL,LevelID_[ilevel]);

    // ============ //
    // pre-smoother //
    // ============ //

    ptr = ((ml_->SingleLevel[LevelID_[ilevel]]).pre_smoother);

    if( ptr != NULL && VizPreSmoother ) {

      RandomAndZero(tmp_sol,tmp_rhs,ml_->Amat[LevelID_[ilevel]].outvec_leng);

      // visualize starting vector
      if( PrintStarting ) {
        if( ieqn != -1 ) {
          for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
            plot_me[i/NumPDEEqns_] = tmp_sol[i+ieqn];
          sprintf(filename,"before-presmoother-eq%d", ieqn);
            printf("%s, numrows = %d\n",filename, NumMyRows());
          ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,
                           filename,LevelID_[ilevel]);
        } else { // by default, print out all equations
          for( int eq=0 ; eq<NumPDEEqns_ ; eq++ ) {
            sprintf(filename,"before-presmoother-eq%d", eq);
            printf("%s, numrows = %d\n",filename, NumMyRows());
            for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ ) {
              plot_me[i/NumPDEEqns_] = tmp_sol[i+eq];
              //FIXME JJH temporary print
              //printf("(eq %d, %d) %d: %lf\n",eq,LevelID_[ilevel],i,tmp_sol[i+eq]);
            }
            ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,
                             filename,LevelID_[ilevel]);
          }
        }
      }

      // increase the number of applications of the smoother
      // and run the smoother
      int old_ntimes = ptr->ntimes;
      ptr->ntimes = NumApplPreSmoother;
      ML_Smoother_Apply(ptr,
            ml_->Amat[LevelID_[ilevel]].outvec_leng,
            tmp_sol,
            ml_->Amat[LevelID_[ilevel]].outvec_leng,
            tmp_rhs, ML_NONZERO);
      ptr->ntimes = old_ntimes;

      // visualize
      // user may have required one specific equation only
      if( ieqn != -1 ) {
        for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
          plot_me[i/NumPDEEqns_] = tmp_sol[i+ieqn];
        sprintf(filename,"after-presmoother-eq%d", ieqn);
        ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,
                         filename,LevelID_[ilevel]);
      } else { // by default, print out all equations
        for( int eq=0 ; eq<NumPDEEqns_ ; eq++ ) {
          for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
            plot_me[i/NumPDEEqns_] = tmp_sol[i+eq];
          sprintf(filename,"after-presmoother-eq%d", eq);
          ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,
                           filename,LevelID_[ilevel]);
        }
      }
    } // VizPreSmoother

    // ============= //
    // post-smoother //
    // ============= //

    ptr = ((ml_->SingleLevel[LevelID_[ilevel]]).post_smoother);
    if( ptr != NULL && VizPostSmoother ) {

      // random solution and 0 rhs
      RandomAndZero(tmp_sol,tmp_rhs,ml_->Amat[LevelID_[ilevel]].outvec_leng);

      // visualize starting vector
      if( PrintStarting ) {
        if( ieqn != -1 ) {
          for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
            plot_me[i/NumPDEEqns_] = tmp_sol[i+ieqn];
          sprintf(filename,"before-postsmoother-eq%d", ieqn);
          ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,
                           filename,LevelID_[ilevel]);
        } else { // by default, print out all equations
          for( int eq=0 ; eq<NumPDEEqns_ ; eq++ ) {
            for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
              plot_me[i/NumPDEEqns_] = tmp_sol[i+eq];
            sprintf(filename,"before-postsmoother-eq%d", eq);
            ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,
                             filename,LevelID_[ilevel]);
          }
        }
      }

      // increase the number of applications of the smoother
      // and run the smoother
      int old_ntimes = ptr->ntimes;
      ptr->ntimes = NumApplPostSmoother;
      ML_Smoother_Apply(ptr,
            ml_->Amat[LevelID_[ilevel]].outvec_leng,
            tmp_sol,
            ml_->Amat[LevelID_[ilevel]].outvec_leng,
            tmp_rhs, ML_ZERO);
      ptr->ntimes = old_ntimes;

      // visualize
      // user may have required one specific equation only
      if( ieqn != -1 ) {
        for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
          plot_me[i/NumPDEEqns_] = tmp_sol[i+ieqn];
        printf(filename,"after-postsmoother-eq%d", ieqn);
        ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,
                         filename,LevelID_[ilevel]);
      } else { // by default, print out all equations
        for( int eq=0 ; eq<NumPDEEqns_ ; eq++ ) {
          for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
            plot_me[i/NumPDEEqns_] = tmp_sol[i+eq];
          sprintf(filename,"after-postsmoother-eq%d", eq);
          ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,
                           filename,LevelID_[ilevel]);
        }
      }
    } // VizPostSmoother
  } // for( ilevel )

  // =============================== //
  // run ML cycle on a random vector //
  // =============================== //

  if( VizCycle ) {

    // random solution and zero rhs
    RandomAndZero(tmp_sol, tmp_rhs,ml_->Amat[LevelID_[0]].outvec_leng);

    // visualize starting vector
    if( PrintStarting ) {
      if( ieqn != -1 ) {
        for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
          plot_me[i/NumPDEEqns_] = tmp_sol[i+ieqn];
        sprintf(filename,"before-cycle-eq%d", ieqn);
        ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,filename,LevelID_[0]);
      } else { // by default, print out all equations
        for( int eq=0 ; eq<NumPDEEqns_ ; eq++ ) {
          for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
            plot_me[i/NumPDEEqns_] = tmp_sol[i+eq];
          sprintf(filename,"before-cycle-eq%d", eq);
          ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,
                           filename,LevelID_[0]);
        }
      }
    }

    // run the cycle
    for( int i=0 ; i<NumCycleSmoother ; ++i )
      ML_Cycle_MG(&(ml_->SingleLevel[ml_->ML_finest_level]),
          tmp_sol, tmp_rhs, ML_NONZERO, ml_->comm, ML_NO_RES_NORM, ml_);

    // visualize
    // user may have required one specific equation only
    if( ieqn != -1 ) {
      for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
        plot_me[i/NumPDEEqns_] = tmp_sol[i+ieqn];
      sprintf(filename,"after-cycle-eq%d", ieqn);
      ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,filename,LevelID_[0]);
    } else { // by default, print out all equations
      for( int eq=0 ; eq<NumPDEEqns_ ; eq++ ) {
        for( int i=0 ; i<NumMyRows() ; i+=NumPDEEqns_ )
          plot_me[i/NumPDEEqns_] = tmp_sol[i+eq];
        sprintf(filename,"after-cycle-eq%d", eq);
        ML_Aggregate_Viz(ml_,aggregates,Format,plot_me,filename,LevelID_[0]);
      }
    }
  } // VizCycle

  // =================== //
  // clean up and return //
  // =================== //

  delete [] tmp_sol;
  delete [] tmp_rhs;
  delete [] plot_me;

  return(0);
}
int main(int argc, char *argv[])
{
  // initialize MPI and Epetra communicator
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  Teuchos::ParameterList GaleriList;

  // The problem is defined on a 2D grid, global size is nx * nx.
  int nx = 30; 
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx * Comm.NumProc());
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap64("Cartesian2D", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );

  // =============================================================== //
  // B E G I N N I N G   O F   I F P A C K   C O N S T R U C T I O N //
  // =============================================================== //

  Teuchos::ParameterList List;

  // builds an Ifpack_AdditiveSchwarz. This is templated with
  // the local solvers, in this case Ifpack_BlockRelaxation.
  // Ifpack_BlockRelaxation requires as a templated a container
  // class. A container defines
  // how to store the diagonal blocks. Two choices are available:
  // Ifpack_DenseContainer (to store them as dense block,
  // than use LAPACK' factorization to apply the inverse of
  // each block), of Ifpack_SparseContainer (to store
  // the diagonal block as Epetra_CrsMatrix's). 
  // 
  // Here, we use Ifpack_SparseContainer, which in turn is
  // templated with the class to use to apply the inverse
  // of each block. For example, we can use Ifpack_Amesos.
 
  // We still have to decide the overlap among the processes,
  // and the overlap among the blocks. The two values
  // can be different. The overlap among the blocks is
  // considered only if block Jacobi is used.
  int OverlapProcs = 2;
  int OverlapBlocks = 0;

  // define the block below to use dense containers
#if 0
  Ifpack_AdditiveSchwarz<Ifpack_BlockRelaxation<Ifpack_DenseContainer> > Prec(A, OverlapProcs);
#else
  Ifpack_AdditiveSchwarz<Ifpack_BlockRelaxation<Ifpack_SparseContainer<Ifpack_Amesos> > > Prec(&*A, OverlapProcs);
#endif

  List.set("relaxation: type", "symmetric Gauss-Seidel");
  List.set("partitioner: overlap", OverlapBlocks);
#ifdef HAVE_IFPACK_METIS
  // use METIS to create the blocks. This requires --enable-ifpack-metis.
  // If METIS is not installed, the user may select "linear". 
  List.set("partitioner: type", "metis");
#else
  // or a simple greedy algorithm is METIS is not enabled
  List.set("partitioner: type", "greedy");
#endif
  // defines here the number of local blocks. If 1,
  // and only one process is used in the computation, then
  // the preconditioner must converge in one iteration. 
  List.set("partitioner: local parts", 4);

  // sets the parameters
  IFPACK_CHK_ERR(Prec.SetParameters(List));

  // initialize the preconditioner. 
  IFPACK_CHK_ERR(Prec.Initialize());

  // Builds the preconditioners.
  IFPACK_CHK_ERR(Prec.Compute());

  // =================================================== //
  // E N D   O F   I F P A C K   C O N S T R U C T I O N //
  // =================================================== //

  // At this point, we need some additional objects
  // to define and solve the linear system.

  // defines LHS and RHS
  Epetra_Vector LHS(A->OperatorDomainMap());
  Epetra_Vector RHS(A->OperatorDomainMap());

  LHS.PutScalar(0.0);
  RHS.Random();

  // need an Epetra_LinearProblem to define AztecOO solver
  Epetra_LinearProblem Problem(&*A,&LHS,&RHS);

  // now we can allocate the AztecOO solver
  AztecOO Solver(Problem);

  // specify solver
  Solver.SetAztecOption(AZ_solver,AZ_cg);
  Solver.SetAztecOption(AZ_output,32);

  // HERE WE SET THE IFPACK PRECONDITIONER
  Solver.SetPrecOperator(&Prec);

  // .. and here we solve
  // NOTE: with one process, the solver must converge in
  // one iteration.
  Solver.Iterate(1550,1e-5);

#ifdef HAVE_MPI
  MPI_Finalize() ; 
#endif

  return(EXIT_SUCCESS);
}
Esempio n. 14
0
int main(int argc, char *argv[])
{
  int ierr = 0, i, forierr = 0;
#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);
  int rank; // My process ID

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );

#else

  int rank = 0;
  Epetra_SerialComm Comm;

#endif

  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  int verbose_int = verbose ? 1 : 0;
  Comm.Broadcast(&verbose_int, 1, 0);
  verbose = verbose_int==1 ? true : false;


  //  char tmp;
  //  if (rank==0) cout << "Press any key to continue..."<< endl;
  //  if (rank==0) cin >> tmp;
  //  Comm.Barrier();

  Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  if(verbose && MyPID==0)
    cout << Epetra_Version() << endl << endl;

  if (verbose) cout << "Processor "<<MyPID<<" of "<< NumProc
		    << " is alive."<<endl;

  // Redefine verbose to only print on PE 0
  if(verbose && rank!=0) 
		verbose = false;

  int NumMyEquations = 10000;
  long long NumGlobalEquations = (NumMyEquations * NumProc) + EPETRA_MIN(NumProc,3);
  if(MyPID < 3) 
    NumMyEquations++;

  // Construct a Map that puts approximately the same Number of equations on each processor

  Epetra_Map Map(NumGlobalEquations, NumMyEquations, 0LL, Comm);
  
  // Get update list and number of local equations from newly created Map
  vector<long long> MyGlobalElements(Map.NumMyElements());
  Map.MyGlobalElements(&MyGlobalElements[0]);

  // Create an integer vector NumNz that is used to build the Petra Matrix.
  // NumNz[i] is the Number of OFF-DIAGONAL term for the ith global equation on this processor

  vector<int> NumNz(NumMyEquations);

  // We are building a tridiagonal matrix where each row has (-1 2 -1)
  // So we need 2 off-diagonal terms (except for the first and last equation)

  for(i = 0; i < NumMyEquations; i++)
    if((MyGlobalElements[i] == 0) || (MyGlobalElements[i] == NumGlobalEquations - 1))
      NumNz[i] = 1;
    else
      NumNz[i] = 2;

  // Create a Epetra_Matrix

  Epetra_CrsMatrix A(Copy, Map, &NumNz[0]);
  EPETRA_TEST_ERR(A.IndicesAreGlobal(),ierr);
  EPETRA_TEST_ERR(A.IndicesAreLocal(),ierr);
  
  // Add  rows one-at-a-time
  // Need some vectors to help
  // Off diagonal Values will always be -1


  vector<double> Values(2);
  Values[0] = -1.0; 
	Values[1] = -1.0;
	vector<long long> Indices(2);
  double two = 2.0;
  int NumEntries;

  forierr = 0;
  for(i = 0; i < NumMyEquations; i++) {
    if(MyGlobalElements[i] == 0) {
			Indices[0] = 1;
			NumEntries = 1;
		}
    else if (MyGlobalElements[i] == NumGlobalEquations-1) {
			Indices[0] = NumGlobalEquations-2;
			NumEntries = 1;
		}
    else {
			Indices[0] = MyGlobalElements[i]-1;
			Indices[1] = MyGlobalElements[i]+1;
			NumEntries = 2;
		}
		forierr += !(A.InsertGlobalValues(MyGlobalElements[i], NumEntries, &Values[0], &Indices[0])==0);
		forierr += !(A.InsertGlobalValues(MyGlobalElements[i], 1, &two, &MyGlobalElements[i])>0); // Put in the diagonal entry
  }
  EPETRA_TEST_ERR(forierr,ierr);

  // Finish up
  A.FillComplete();
  A.OptimizeStorage();

  Epetra_JadMatrix JadA(A);
  Epetra_JadMatrix JadA1(A);
  Epetra_JadMatrix JadA2(A);

  // Create vectors for Power method

  Epetra_Vector q(Map);
  Epetra_Vector z(Map); z.Random();
  Epetra_Vector resid(Map);

  Epetra_Flops flopcounter;
  A.SetFlopCounter(flopcounter);
  q.SetFlopCounter(A);
  z.SetFlopCounter(A);
  resid.SetFlopCounter(A);
  JadA.SetFlopCounter(A);
  JadA1.SetFlopCounter(A);
  JadA2.SetFlopCounter(A);
  

  if (verbose) cout << "=======================================" << endl
		    << "Testing Jad using CrsMatrix as input..." << endl
		    << "=======================================" << endl;

  A.ResetFlops();
  powerMethodTests(A, JadA, Map, q, z, resid, verbose);

  // Increase diagonal dominance

  if (verbose) cout << "\n\nIncreasing the magnitude of first diagonal term and solving again\n\n"
		    << endl;

  
  if (A.MyGlobalRow(0)) {
    int numvals = A.NumGlobalEntries(0);
    vector<double> Rowvals(numvals);
    vector<long long> Rowinds(numvals);
    A.ExtractGlobalRowCopy(0, numvals, numvals, &Rowvals[0], &Rowinds[0]); // Get A[0,0]

    for (i=0; i<numvals; i++) if (Rowinds[i] == 0) Rowvals[i] *= 10.0;
    
    A.ReplaceGlobalValues(0, numvals, &Rowvals[0], &Rowinds[0]);
  }
  JadA.UpdateValues(A);
  A.ResetFlops();
  powerMethodTests(A, JadA, Map, q, z, resid, verbose);

  if (verbose) cout << "================================================================" << endl
		          << "Testing Jad using Jad matrix as input matrix for construction..." << endl
		          << "================================================================" << endl;
  JadA1.ResetFlops();
  powerMethodTests(JadA1, JadA2, Map, q, z, resid, verbose);

#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif

return ierr ;
}
Esempio n. 15
0
int main(int argc, char *argv[]) 
{
  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif
 
  bool verbose = false;

  if (argc > 1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
#ifdef HAVE_MPI
  int NumProc = Comm.NumProc();
#endif

  // define the parameters of the nonlinear PDE problem
  int nx = 5;
  int ny = 6;  
  double lambda = 1.0;

  PDEProblem Problem(nx,ny,lambda,&Comm);
 
  // starting solution, here a zero vector
  Epetra_Vector InitialGuess(Problem.GetMatrix()->Map());
  InitialGuess.PutScalar(0.0);

  // random vector upon which to apply each operator being tested
  Epetra_Vector directionVec(Problem.GetMatrix()->Map());
  directionVec.Random();

  // Set up the problem interface
  Teuchos::RCP<SimpleProblemInterface> interface = 
    Teuchos::rcp(new SimpleProblemInterface(&Problem) );
  
  // Set up theolver options parameter list
  Teuchos::RCP<Teuchos::ParameterList> noxParamsPtr = Teuchos::rcp(new Teuchos::ParameterList);
  Teuchos::ParameterList & noxParams = *(noxParamsPtr.get());

  // Set the nonlinear solver method
  noxParams.set("Nonlinear Solver", "Line Search Based");

  // Set up the printing utilities
  // Only print output if the "-v" flag is set on the command line
  Teuchos::ParameterList& printParams = noxParams.sublist("Printing");
  printParams.set("MyPID", MyPID); 
  printParams.set("Output Precision", 5);
  printParams.set("Output Processor", 0);
  if( verbose )
    printParams.set("Output Information", 
		NOX::Utils::OuterIteration + 
		NOX::Utils::OuterIterationStatusTest + 
		NOX::Utils::InnerIteration +
		NOX::Utils::Parameters + 
		NOX::Utils::Details + 
		NOX::Utils::Warning +
		NOX::Utils::TestDetails);
  else
    printParams.set("Output Information", NOX::Utils::Error +
		NOX::Utils::TestDetails);

  NOX::Utils printing(printParams);


  // Identify the test problem
  if (printing.isPrintType(NOX::Utils::TestDetails))
    printing.out() << "Starting epetra/NOX_Operators/NOX_Operators.exe" << std::endl;

  // Identify processor information
#ifdef HAVE_MPI
  if (printing.isPrintType(NOX::Utils::TestDetails)) {
    printing.out() << "Parallel Run" << std::endl;
    printing.out() << "Number of processors = " << NumProc << std::endl;
    printing.out() << "Print Process = " << MyPID << std::endl;
  }
  Comm.Barrier();
  if (printing.isPrintType(NOX::Utils::TestDetails))
    printing.out() << "Process " << MyPID << " is alive!" << std::endl;
  Comm.Barrier();
#else
  if (printing.isPrintType(NOX::Utils::TestDetails))
    printing.out() << "Serial Run" << std::endl;
#endif

  int status = 0;

  Teuchos::RCP<NOX::Epetra::Interface::Required> iReq = interface;

  // Need a NOX::Epetra::Vector for constructor
  NOX::Epetra::Vector noxInitGuess(InitialGuess, NOX::DeepCopy);

  // Analytic matrix
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( Problem.GetMatrix(), false );

  Epetra_Vector A_resultVec(Problem.GetMatrix()->Map());
  interface->computeJacobian( InitialGuess, *A );
  A->Apply( directionVec, A_resultVec );

  // FD operator
  Teuchos::RCP<Epetra_CrsGraph> graph = Teuchos::rcp( const_cast<Epetra_CrsGraph*>(&A->Graph()), false );
  Teuchos::RCP<NOX::Epetra::FiniteDifference> FD = Teuchos::rcp(
    new NOX::Epetra::FiniteDifference(printParams, iReq, noxInitGuess, graph) );
  
  Epetra_Vector FD_resultVec(Problem.GetMatrix()->Map());
  FD->computeJacobian(InitialGuess, *FD);
  FD->Apply( directionVec, FD_resultVec );

  // Matrix-Free operator
  Teuchos::RCP<NOX::Epetra::MatrixFree> MF = Teuchos::rcp(
    new NOX::Epetra::MatrixFree(printParams, iReq, noxInitGuess) );

  Epetra_Vector MF_resultVec(Problem.GetMatrix()->Map());
  MF->computeJacobian(InitialGuess, *MF);
  MF->Apply( directionVec, MF_resultVec );

  // Need NOX::Epetra::Vectors for tests
  NOX::Epetra::Vector noxAvec ( A_resultVec , NOX::DeepCopy );
  NOX::Epetra::Vector noxFDvec( FD_resultVec, NOX::DeepCopy );
  NOX::Epetra::Vector noxMFvec( MF_resultVec, NOX::DeepCopy );

  // Create a TestCompare class
  NOX::Epetra::TestCompare tester( printing.out(), printing);
  double abstol = 1.e-4;
  double reltol = 1.e-4 ;
  //NOX::TestCompare::CompareType aComp = NOX::TestCompare::Absolute;

  status += tester.testVector( noxFDvec, noxAvec, reltol, abstol,
                              "Finite-Difference Operator Apply Test" );
  status += tester.testVector( noxMFvec, noxAvec, reltol, abstol,
                              "Matrix-Free Operator Apply Test" );

  // Summarize test results  
  if( status == 0 )
    printing.out() << "Test passed!" << std::endl;
  else 
    printing.out() << "Test failed!" << std::endl;

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  // Final return value (0 = successfull, non-zero = failure)
  return status;

}
Esempio n. 16
0
// Restored MultiVector tests
int main(int argc, char *argv[]) {

  int ierr = 0, i, j;

#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);
  int rank; // My process ID

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);

#else

  int rank = 0;
  Epetra_SerialComm Comm;

#endif

  Comm.SetTracebackMode(0); // This should shut down any error tracing
  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  //  char tmp;
  //  if (rank==0) cout << "Press any key to continue..."<< endl;
  //  if (rank==0) cin >> tmp;
  //  Comm.Barrier();

  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc(); 

  if (verbose && MyPID==0)
    cout << Epetra_Version() << endl << endl;

  if (verbose) cout << Comm <<endl;

  bool verbose1 = verbose;

  // Redefine verbose to only print on PE 0
  if (verbose && rank!=0) verbose = false;

  int NumMyElements = 10000;
  int NumMyElements1 = NumMyElements; // Needed for localmap
  int NumGlobalElements = NumMyElements*NumProc+EPETRA_MIN(NumProc,3);
  if (MyPID < 3) NumMyElements++;
  int IndexBase = 0;
  int ElementSize = 7;
  int NumVectors = 4;
  
  // Test LocalMap constructor
  // and Petra-defined uniform linear distribution constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_LocalMap(NumMyElements1, IndexBase, Comm)" << endl;
  if (verbose) cout << "     and Epetra_BlockMap(NumGlobalElements, ElementSize, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  Epetra_LocalMap *LocalMap = new Epetra_LocalMap(NumMyElements1, IndexBase,
                              Comm);
  Epetra_BlockMap * BlockMap = new Epetra_BlockMap(NumGlobalElements, ElementSize, IndexBase, Comm);
  EPETRA_TEST_ERR(MultiVectorTests(*BlockMap, NumVectors, verbose),ierr);


  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, NumVectors, verbose),ierr);

  delete BlockMap;

  // Test User-defined linear distribution constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_BlockMap(NumGlobalElements, NumMyElements, ElementSize, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  BlockMap = new Epetra_BlockMap(NumGlobalElements, NumMyElements, ElementSize, IndexBase, Comm);

  EPETRA_TEST_ERR(MultiVectorTests(*BlockMap, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, NumVectors, verbose),ierr);

  delete BlockMap;

  // Test User-defined arbitrary distribution constructor
  // Generate Global Element List.  Do in reverse for fun!

  int * MyGlobalElements = new int[NumMyElements];
  int MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase;
  if (Comm.MyPID()>2) MaxMyGID+=3;
  for (i = 0; i<NumMyElements; i++) MyGlobalElements[i] = MaxMyGID-i;

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements,  ElementSize, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  BlockMap = new Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements, ElementSize,
		      IndexBase, Comm);
  EPETRA_TEST_ERR(MultiVectorTests(*BlockMap, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, NumVectors, verbose),ierr);

  delete BlockMap;

  int * ElementSizeList = new int[NumMyElements];
  int NumMyEquations = 0;
  int NumGlobalEquations = 0;
  for (i = 0; i<NumMyElements; i++) 
    {
      ElementSizeList[i] = i%6+2; // blocksizes go from 2 to 7
      NumMyEquations += ElementSizeList[i];
    }
  ElementSize = 7; // Set to maximum for use in checkmap
  NumGlobalEquations = Comm.NumProc()*NumMyEquations;

  // Adjust NumGlobalEquations based on processor ID
  if (Comm.NumProc() > 3)
    {
      if (Comm.MyPID()>2)
	NumGlobalEquations += 3*((NumMyElements)%6+2);
      else 
	NumGlobalEquations -= (Comm.NumProc()-3)*((NumMyElements-1)%6+2);
    }

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements,  ElementSizeList, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  BlockMap = new Epetra_BlockMap(NumGlobalElements, NumMyElements, MyGlobalElements, ElementSizeList,
		      IndexBase, Comm);
  EPETRA_TEST_ERR(MultiVectorTests(*BlockMap, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, NumVectors, verbose),ierr);

  // Test Copy constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_BlockMap(*BlockMap)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  Epetra_BlockMap * BlockMap1 = new Epetra_BlockMap(*BlockMap);

  EPETRA_TEST_ERR(MultiVectorTests(*BlockMap, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*BlockMap, *LocalMap, NumVectors, verbose),ierr);

  delete [] ElementSizeList;
  delete [] MyGlobalElements;
  delete BlockMap;
  delete BlockMap1;


  // Test Petra-defined uniform linear distribution constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  Epetra_Map * Map = new Epetra_Map(NumGlobalElements, IndexBase, Comm);
  EPETRA_TEST_ERR(MultiVectorTests(*Map, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, NumVectors, verbose),ierr);

  delete Map;

  // Test User-defined linear distribution constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, NumMyElements, IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  Map = new Epetra_Map(NumGlobalElements, NumMyElements, IndexBase, Comm);

  EPETRA_TEST_ERR(MultiVectorTests(*Map, NumVectors, verbose),ierr);

  EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, NumVectors, verbose),ierr);

  delete Map;

  // Test User-defined arbitrary distribution constructor
  // Generate Global Element List.  Do in reverse for fun!

  MyGlobalElements = new int[NumMyElements];
  MaxMyGID = (Comm.MyPID()+1)*NumMyElements-1+IndexBase;
  if (Comm.MyPID()>2) MaxMyGID+=3;
  for (i = 0; i<NumMyElements; i++) MyGlobalElements[i] = MaxMyGID-i;

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements,  IndexBase, Comm)" << endl;
  if (verbose) cout << "*********************************************************" << endl;

  Map = new Epetra_Map(NumGlobalElements, NumMyElements, MyGlobalElements, 
		      IndexBase, Comm);
  EPETRA_TEST_ERR(MultiVectorTests(*Map, NumVectors, verbose),ierr);

  //EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, NumVectors, verbose),ierr);

  // Test Copy constructor

  if (verbose) cout << "\n*********************************************************" << endl;
  if (verbose) cout << "Checking Epetra_Map(*Map)" << endl;
  if (verbose) cout << "*********************************************************" << endl;
 
  Epetra_Map Map1(*Map);

  EPETRA_TEST_ERR(MultiVectorTests(*Map, NumVectors, verbose),ierr);

  //EPETRA_TEST_ERR(MatrixTests(*Map, *LocalMap, NumVectors, verbose),ierr);

  delete [] MyGlobalElements;
  delete Map;
  delete LocalMap;

  if (verbose1)
    {
      // Test MultiVector MFLOPS for 2D Dot Product
      int M = 27;
      int N = 27;
      int K = 10000;
      Epetra_Map Map2(-1, K, IndexBase, Comm);
      Epetra_LocalMap Map3(M, IndexBase, Comm);
      
      Epetra_MultiVector A(Map2,N);A.Random();
      Epetra_MultiVector B(Map2,N);B.Random();
      Epetra_MultiVector C(Map3,N);C.Random();

      if (verbose) cout << "Testing Assignment operator" << endl;

      double tmp1 = 1.00001* (double) (MyPID+1);
      double tmp2 = tmp1;
      A[1][1] = tmp1;
      tmp2 = A[1][1];
      cout << "On PE "<< MyPID << "  A[1][1] should equal = " << tmp1;
      if (tmp1==tmp2) cout << " and it does!" << endl;
      else cout << " but it equals " << tmp2;
 
      Comm.Barrier();
	  
      if (verbose) cout << "Testing MFLOPs" << endl;
      Epetra_Flops counter;
      C.SetFlopCounter(counter);
      Epetra_Time mytimer(Comm);
      C.Multiply('T', 'N', 0.5, A, B, 0.0);
      double Multiply_time = mytimer.ElapsedTime();
      double Multiply_flops = C.Flops();
      if (verbose) cout << "\n\nTotal FLOPs = " << Multiply_flops << endl;
      if (verbose) cout << "Total Time  = " << Multiply_time << endl;
      if (verbose) cout << "MFLOPs      = " << Multiply_flops/Multiply_time/1000000.0 << endl;

      Comm.Barrier();
	  
      // Test MultiVector ostream operator with Petra-defined uniform linear distribution constructor
      // and a small vector
      
      Epetra_Map Map4(100, IndexBase, Comm);
      double * Dp = new double[200]; 
      for (j=0; j<2; j++)
	for (i=0; i<100; i++)
	  Dp[i+j*100] = i+j*100;
      Epetra_MultiVector D(View, Map4,Dp, 100, 2);
	  
      if (verbose) cout << "\n\nTesting ostream operator:  Multivector  should be 100-by-2 and print i,j indices" 
	   << endl << endl;
      cout << D << endl;

      Epetra_BlockMap Map5(-1, 25, 4, IndexBase, Comm);
      Epetra_MultiVector D1(View, Map5,Dp, 100, 2);
      if (verbose) cout << "\n\nTesting ostream operator:  Same Multivector as before except using BlockMap of 25x4" 
	   << endl << endl;
      cout << D1 << endl;

      if (verbose) cout << "Traceback Mode value = " << D.GetTracebackMode() << endl;
      delete [] Dp;
    }

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return ierr;
}
Esempio n. 17
0
int main(int argc, char *argv[]) {

  int ierr = 0;

#ifdef EPETRA_MPI

  // Initialize MPI

  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);

#else

  Epetra_SerialComm Comm;

#endif

//  Comm.SetTracebackMode(0); // This should shut down any error tracing
  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

#ifdef EPETRA_MPI
  int localverbose = verbose ? 1 : 0;
  int globalverbose=0;
  MPI_Allreduce(&localverbose, &globalverbose, 1, MPI_INT, MPI_SUM,
		MPI_COMM_WORLD);
  verbose = (globalverbose>0);
#endif

  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc(); 

  if (verbose && MyPID==0)
    cout << Epetra_Version() << endl << endl;

  if (verbose) cout << Comm <<endl;

  // Redefine verbose to only print on PE 0
  //if (verbose && rank!=0) verbose = false;

  int NumMyElements = 4;
  int NumGlobalElements = NumMyElements*NumProc;
  int IndexBase = 0;
  
  Epetra_Map Map(NumGlobalElements, NumMyElements, IndexBase, Comm);

  EPETRA_TEST_ERR( Drumm1(Map, verbose),ierr);

  EPETRA_TEST_ERR( Drumm2(Map, verbose),ierr);

  EPETRA_TEST_ERR( Drumm3(Map, verbose),ierr);


  bool preconstruct_graph = false;

  EPETRA_TEST_ERR( four_quads(Comm, preconstruct_graph, verbose), ierr);

  preconstruct_graph = true;

  EPETRA_TEST_ERR( four_quads(Comm, preconstruct_graph, verbose), ierr);

  EPETRA_TEST_ERR( submatrix_formats(Comm, verbose), ierr);

  EPETRA_TEST_ERR( rectangular(Comm, verbose), ierr);

#ifdef EPETRA_MPI
  MPI_Finalize();
#endif

  return ierr;
}
Esempio n. 18
0
int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif

  if (Comm.NumProc() == 1)
  {
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    cout << "Test `TestOverlappingRowMatrix.exe' passed!" << endl;
    exit(EXIT_SUCCESS);
  }

  Teuchos::ParameterList GaleriList;
  int nx = 100; 
  GaleriList.set("n", nx * nx);
  GaleriList.set("nx", nx);
  GaleriList.set("ny", nx);
  Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap64("Linear", Comm, GaleriList) );
  Teuchos::RefCountPtr<Epetra_CrsMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );

  int OverlapLevel = 5;
  Epetra_Time Time(Comm);

  // ======================================== //
  // Build the overlapping matrix using class //
  // Ifpack_OverlappingRowMatrix.             //
  // ======================================== //
 
  Time.ResetStartTime();
  Ifpack_OverlappingRowMatrix B(A,OverlapLevel);
  if (Comm.MyPID() == 0)
    cout << "Time to create B = " << Time.ElapsedTime() << endl;

  long long NumGlobalRowsB = B.NumGlobalRows64();
  long long NumGlobalNonzerosB = B.NumGlobalNonzeros64();

  Epetra_Vector X(A->RowMatrixRowMap());
  Epetra_Vector Y(A->RowMatrixRowMap());
  for (int i = 0 ; i < A->NumMyRows() ; ++i) 
    X[i] = 1.0* A->RowMatrixRowMap().GID64(i);
  Y.PutScalar(0.0);

  Epetra_Vector ExtX_B(B.RowMatrixRowMap());
  Epetra_Vector ExtY_B(B.RowMatrixRowMap());
  ExtY_B.PutScalar(0.0);

  IFPACK_CHK_ERR(B.ImportMultiVector(X,ExtX_B));
  IFPACK_CHK_ERR(B.Multiply(false,ExtX_B,ExtY_B));
  IFPACK_CHK_ERR(B.ExportMultiVector(ExtY_B,Y,Add));

  double Norm_B;
  Y.Norm2(&Norm_B);
  if (Comm.MyPID() == 0)
    cout << "Norm of Y using B = " << Norm_B << endl;
  
  // ================================================== //
  //Build the overlapping matrix as an Epetra_CrsMatrix //
  // ================================================== //

  Time.ResetStartTime();
  Epetra_CrsMatrix& C = 
    *(Ifpack_CreateOverlappingCrsMatrix(&*A,OverlapLevel));
  if (Comm.MyPID() == 0)
    cout << "Time to create C = " << Time.ElapsedTime() << endl;

  // simple checks on global quantities
  long long NumGlobalRowsC = C.NumGlobalRows64();
  long long NumGlobalNonzerosC = C.NumGlobalNonzeros64();
  assert (NumGlobalRowsB == NumGlobalRowsC);
  assert (NumGlobalNonzerosB == NumGlobalNonzerosC);

  Epetra_Vector ExtX_C(C.RowMatrixRowMap());
  Epetra_Vector ExtY_C(C.RowMatrixRowMap());
  ExtY_C.PutScalar(0.0);
  Y.PutScalar(0.0);

  IFPACK_CHK_ERR(C.Multiply(false,X,Y));

  double Norm_C;
  Y.Norm2(&Norm_C);
  if (Comm.MyPID() == 0)
    cout << "Norm of Y using C = " << Norm_C << endl;

  if (IFPACK_ABS(Norm_B - Norm_C) > 1e-5)
    IFPACK_CHK_ERR(-1);

  // ======================= //
  // now localize the matrix //
  // ======================= //

  Ifpack_LocalFilter D(Teuchos::rcp(&B, false));

#ifdef HAVE_MPI
  MPI_Finalize() ; 
#endif

  if (Comm.MyPID() == 0)
    cout << "Test `TestOverlappingRowMatrix.exe' passed!" << endl;

  return(EXIT_SUCCESS);
}
Esempio n. 19
0
int main(int argc, char *argv[])
{
  int total_err=0;

  // Initialize MPI

  MPI_Init(&argc,&argv);
  int rank; // My process ID

  MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );

  bool verbose = false;

  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') verbose = true;

  int verbose_int = verbose ? 1 : 0;
  Comm.Broadcast(&verbose_int, 1, 0);
  verbose = verbose_int==1 ? true : false;

  Comm.SetTracebackMode(0); // This should shut down any error traceback reporting
  int MyPID = Comm.MyPID();
  int NumProc = Comm.NumProc();

  if(verbose && MyPID==0)
    cout << Epetra_Version() << std::endl << std::endl;

  if (verbose) cout << "Processor "<<MyPID<<" of "<< NumProc
		    << " is alive."<<endl;

  // Redefine verbose to only print on PE 0
  if(verbose && rank!=0) verbose = false;

  // Matrix & Map pointers
  Epetra_CrsMatrix *A, *B, *C;
  Epetra_Map* Map1;
  Epetra_Import* Import1;
  Epetra_Export* Export1;
  double diff_tol=1e-12;

#define ENABLE_TEST_1
#define ENABLE_TEST_2
#define ENABLE_TEST_3
#define ENABLE_TEST_4
#define ENABLE_TEST_5
#define ENABLE_TEST_6

  /////////////////////////////////////////////////////////
  // Test #1: Tridiagonal Matrix; Migrate to Proc 0
  /////////////////////////////////////////////////////////
#ifdef ENABLE_TEST_1
  {
    double diff;
    build_test_matrix(Comm,1,A);
    int num_global = A->RowMap().NumGlobalElements();

    // New map with all on Proc1
    if(MyPID==0) Map1=new Epetra_Map(num_global,num_global,0,Comm);
    else         Map1=new Epetra_Map(num_global,0,0,Comm);

    // Execute fused import constructor
    Import1 = new Epetra_Import(*Map1,A->RowMap());
    B=new Epetra_CrsMatrix(*A,*Import1,0,&A->RangeMap());

    diff=test_with_matvec(*A,*B);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedImport: Test #1 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    // Execute fused export constructor
    delete B;
    Export1 = new Epetra_Export(A->RowMap(),*Map1);
    B=new Epetra_CrsMatrix(*A,*Export1,0,&A->RangeMap());

    diff=test_with_matvec(*A,*B);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedExport: Test #1 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    delete A; delete B; delete Map1; delete Import1; delete Export1;
  }
#endif


  /////////////////////////////////////////////////////////
  // Test #2: Tridiagonal Matrix; Locally Reversed Map
  /////////////////////////////////////////////////////////
#ifdef ENABLE_TEST_2
  {
    double diff;
    build_test_matrix(Comm,1,A);
    int num_local = A->RowMap().NumMyElements();

    std::vector<int> MyGIDS(num_local);
    for(int i=0; i<num_local; i++)
      MyGIDS[i] = A->RowMap().GID(num_local-i-1);

    // New map with all on Proc1
    Map1=new Epetra_Map(-1,num_local,&MyGIDS[0],0,Comm);

    // Execute fused import constructor
    Import1 = new Epetra_Import(*Map1,A->RowMap());
    B=new Epetra_CrsMatrix(*A,*Import1,0,&A->RangeMap());

    diff=test_with_matvec(*A,*B);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedImport: Test #2 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    // Execute fused export constructor
    delete B;
    Export1 = new Epetra_Export(A->RowMap(),*Map1);
    B=new Epetra_CrsMatrix(*A,*Export1,0,&A->RangeMap());

    diff=test_with_matvec(*A,*B);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedExport: Test #2 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    delete A; delete B; delete Map1; delete Import1; delete Export1;
  }
#endif

  /////////////////////////////////////////////////////////
  // Test #3: Tridiagonal Matrix; Globally Reversed Map
  /////////////////////////////////////////////////////////
#ifdef ENABLE_TEST_3
  {
    double diff;
    build_test_matrix(Comm,1,A);
    int num_local  = A->RowMap().NumMyElements();
    int num_global = A->RowMap().NumGlobalElements();
    int num_scansum = 0;

    Comm.ScanSum(&num_local,&num_scansum,1);

    // New Map
    std::vector<int> MyGIDS(num_local);
    for(int i=0; i<num_local; i++)
      MyGIDS[i] = num_global - num_scansum + num_local - i - 1;
    Map1=new Epetra_Map(-1,num_local,&MyGIDS[0],0,Comm);


    // Execute fused import constructor
    Import1 = new Epetra_Import(*Map1,A->RowMap());
    B=new Epetra_CrsMatrix(*A,*Import1,0,&A->RangeMap());

    diff=test_with_matvec(*A,*B);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedImport: Test #3 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    // Execute fused export constructor
    delete B;
    Export1 = new Epetra_Export(A->RowMap(),*Map1);
    B=new Epetra_CrsMatrix(*A,*Export1,0,&A->RangeMap());

    diff=test_with_matvec(*A,*B);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedExport: Test #3 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    delete A; delete B; delete Map1; delete Import1; delete Export1;
  }
#endif


  /////////////////////////////////////////////////////////
  // Test #4: Tridiagonal Matrix; MMM style halo import
  /////////////////////////////////////////////////////////
#ifdef ENABLE_TEST_4
  {
    double diff;
    build_test_matrix(Comm,1,A);

    // Assume we always own the diagonal
    int num_local = A->NumMyCols()-A->NumMyRows();
    std::vector<int> MyGIDS(num_local);

    for(int i=0, idx=0; i<A->NumMyCols(); i++)
      if(A->LRID(A->GCID(i)) == -1){
	MyGIDS[idx] = A->GCID(i);
	idx++;
      }

    // New map
    const int * MyGIDS_ptr = MyGIDS.size() ? &MyGIDS[0] : 0;
    Map1=new Epetra_Map(-1,num_local,MyGIDS_ptr,0,Comm);


    // Execute fused import constructor
    Import1 = new Epetra_Import(*Map1,A->RowMap());
    B=new Epetra_CrsMatrix(*A,*Import1,0,&A->RangeMap());

    // Build unfused matrix to compare
    C=new Epetra_CrsMatrix(Copy,*Map1,0);
    build_matrix_unfused(*A,*Import1,C);

    diff=test_with_matvec(*B,*C);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedImport: Test #4 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    // Execute fused export constructor
    delete B;
    Export1 = new Epetra_Export(A->RowMap(),*Map1);
    B=new Epetra_CrsMatrix(*A,*Export1,0,&A->RangeMap());

    diff=test_with_matvec(*B,*C);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedExport: Test #4 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    delete A; delete B; delete C; delete Map1; delete Import1; delete Export1;
  }
#endif


  /////////////////////////////////////////////////////////
  // Test 5: Tridiagonal Matrix; Migrate to Proc 0, Replace Maps
  /////////////////////////////////////////////////////////
#ifdef ENABLE_TEST_5
  {
    double diff;
    build_test_matrix(Comm,1,A);

    // New map with all on Procs 0 and 2
    build_test_map(A->RowMap(),Map1);

    // Execute fused import constructor
    Import1 = new Epetra_Import(*Map1,A->RowMap());
    B=new Epetra_CrsMatrix(*A,*Import1,Map1,Map1);

    diff=test_with_matvec(*A,*B);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedImport: Test #5 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    // Execute fused export constructor
    delete B;
    Export1 = new Epetra_Export(A->RowMap(),*Map1);
    B=new Epetra_CrsMatrix(*A,*Export1,Map1,Map1);

    diff=test_with_matvec(*A,*B);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedExport: Test #5 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    delete A; delete B; delete Map1; delete Import1; delete Export1;
  }
#endif


  /////////////////////////////////////////////////////////
  // Test 6: Tridiagonal Matrix; Migrate to Proc 0, Replace Comm
  /////////////////////////////////////////////////////////
#ifdef ENABLE_TEST_6
  {
    double diff;
    build_test_matrix(Comm,1,A);

    // New map with all on Procs 0 and 2
    build_test_map(A->RowMap(),Map1);

    // Execute fused import constructor
    Import1 = new Epetra_Import(*Map1,A->RowMap());
    B=new Epetra_CrsMatrix(*A,*Import1,Map1,Map1,true);

    diff=test_with_matvec_reduced_maps(*A,*B,*Map1);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedImport: Test #6 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    // Execute fused export constructor
    delete B;
    Export1 = new Epetra_Export(A->RowMap(),*Map1);
    B=new Epetra_CrsMatrix(*A,*Export1,Map1,Map1,true);

    diff=test_with_matvec_reduced_maps(*A,*B,*Map1);
    if(diff > diff_tol){
      if(MyPID==0) cout<<"FusedExport: Test #6 FAILED with norm diff = "<<diff<<"."<<endl;
      total_err--;
    }

    delete A; delete B; delete Map1; delete Import1; delete Export1;
  }
#endif


  // Final output for OK
  if(MyPID==0 && total_err==0)
    cout<<"FusedImportExport: All tests PASSED."<<endl;

  // Cleanup
  MPI_Finalize();

  return total_err ;
}
Esempio n. 20
0
int main (int argc, char **argv)
{
#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  try 
  {
    // This is the HDF5 file manager
    EpetraExt::HDF5 HDF5(Comm);

    // creates a new file. To open an existing file, use Open("myfile.h5")
    // This file contains:
    // - a sparse (diagonal) matrix, whose group name is "speye"
    // - a multivector, whose group name is "x"
    // - a map for 2-processor run, whose group name is "map-2"

    HDF5.Open("matlab.h5");

    if (Comm.MyPID() == 0)
      cout << endl;
      cout << "*) Reading Epetra_CrsMatrix from HDF5 file matlab.h5..." << endl;
      cout << endl;

    // first query for matrix properties:
    int NumGlobalRows, NumGlobalCols, NumGlobalNonzeros;
    int NumGlobalDiagonals, MaxNumEntries;
    double NormOne, NormInf;

    HDF5.ReadCrsMatrixProperties("speye", NumGlobalRows, NumGlobalCols,
                                 NumGlobalNonzeros, NumGlobalDiagonals, 
                                 MaxNumEntries, NormOne, NormInf);

    if (Comm.MyPID() == 0)
    {
      cout << "Matrix information as given by ReadCrsMatrixProperties()";
      cout << endl << endl;
      cout << "NumGlobalRows = " << NumGlobalRows << endl;
      cout << "NumGlobalCols = " << NumGlobalCols << endl;
      cout << "NumGlobalNonzeros = " << NumGlobalNonzeros << endl;
      cout << "NumGlobalDiagonals = " << NumGlobalDiagonals << endl;
      cout << "MaxNumEntries = " << MaxNumEntries << endl;
      cout << "NormOne = " << NormOne << endl;
      cout << "NormInf = " << NormInf << endl;
    }

    // the reading the actual matrix, with a linear map, since no map 
    // has been specified.
    Epetra_CrsMatrix* Matrix = 0;
    HDF5.Read("speye", Matrix);

    cout << *Matrix;

    if (Comm.MyPID() == 0)
    {
      cout << endl;
      cout << "*) Reading Epetra_MultiVector from HDF5 file matlab.h5..." << endl;
      cout << endl;
    }

    Epetra_MultiVector* x;
    HDF5.Read("x", x);
    cout << *x;

    if (Comm.NumProc() == 2)
    {
      if (Comm.MyPID() == 0)
      {
        cout << endl;
        cout << "*) Reading Epetra_Map from HDF5 file matlab.h5..." << endl;
        cout << endl;
      }

      Epetra_Map* Map;
      HDF5.Read("map-2", Map);
      cout << *Map;
    }

    // We finally close the file. Better to close it before calling
    // MPI_Finalize() to avoid MPI-related errors, since Close() might call MPI
    // functions.
    HDF5.Close();

    // delete memory
    if (Matrix) delete Matrix;
  }
  catch(EpetraExt::Exception& rhs) 
  {
    rhs.Print();
  }
  catch (...) 
  {
    cerr << "Caught generic exception" << endl;
  }

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
}
Esempio n. 21
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
    MPI_Init(&argc,&argv);
    Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
    Epetra_SerialComm Comm;
#endif

    Teuchos::ParameterList GaleriList;

    // The problem is defined on a 2D grid, global size is nx * nx.
    int nx = 30;
    GaleriList.set("n", nx * nx);
    GaleriList.set("nx", nx);
    GaleriList.set("ny", nx);
    Teuchos::RefCountPtr<Epetra_Map> Map = Teuchos::rcp( Galeri::CreateMap("Linear", Comm, GaleriList) );
    Teuchos::RefCountPtr<Epetra_RowMatrix> A = Teuchos::rcp( Galeri::CreateCrsMatrix("Laplace2D", &*Map, GaleriList) );

    // =============================================================== //
    // B E G I N N I N G   O F   I F P A C K   C O N S T R U C T I O N //
    // =============================================================== //

    Teuchos::ParameterList List;

    // allocates an IFPACK factory. No data is associated
    // to this object (only method Create()).
    Ifpack Factory;

    // create the preconditioner. For valid PrecType values,
    // please check the documentation
    string PrecType = "ILU"; // incomplete LU
    int OverlapLevel = 1; // must be >= 0. If Comm.NumProc() == 1,
    // it is ignored.

    Teuchos::RefCountPtr<Ifpack_Preconditioner> Prec = Teuchos::rcp( Factory.Create(PrecType, &*A, OverlapLevel) );
    assert(Prec != Teuchos::null);

    // specify parameters for ILU
    List.set("fact: drop tolerance", 1e-9);
    List.set("fact: level-of-fill", 1);
    // the combine mode is on the following:
    // "Add", "Zero", "Insert", "InsertAdd", "Average", "AbsMax"
    // Their meaning is as defined in file Epetra_CombineMode.h
    List.set("schwarz: combine mode", "Add");
    // sets the parameters
    IFPACK_CHK_ERR(Prec->SetParameters(List));

    // initialize the preconditioner. At this point the matrix must
    // have been FillComplete()'d, but actual values are ignored.
    IFPACK_CHK_ERR(Prec->Initialize());

    // Builds the preconditioners, by looking for the values of
    // the matrix.
    IFPACK_CHK_ERR(Prec->Compute());

    // =================================================== //
    // E N D   O F   I F P A C K   C O N S T R U C T I O N //
    // =================================================== //

    // At this point, we need some additional objects
    // to define and solve the linear system.

    // defines LHS and RHS
    Epetra_Vector LHS(A->OperatorDomainMap());
    Epetra_Vector RHS(A->OperatorDomainMap());

    // solution is constant
    LHS.PutScalar(1.0);
    // now build corresponding RHS
    A->Apply(LHS,RHS);

    // now randomize the solution
    RHS.Random();

    // need an Epetra_LinearProblem to define AztecOO solver
    Epetra_LinearProblem Problem(&*A,&LHS,&RHS);

    // now we can allocate the AztecOO solver
    AztecOO Solver(Problem);

    // specify solver
    Solver.SetAztecOption(AZ_solver,AZ_gmres);
    Solver.SetAztecOption(AZ_output,32);

    // HERE WE SET THE IFPACK PRECONDITIONER
    Solver.SetPrecOperator(&*Prec);

    // .. and here we solve
    Solver.Iterate(1550,1e-8);

    cout << *Prec;

#ifdef HAVE_MPI
    MPI_Finalize() ;
#endif

    return(EXIT_SUCCESS);
}
Esempio n. 22
0
int main(int argc, char *argv[]) {
  // Initialize MPI
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  // Create a communicator for Epetra objects
#ifdef HAVE_MPI
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif
 
  bool verbose = false;

  if (argc > 1)
    if (argv[1][0]=='-' && argv[1][1]=='v')
      verbose = true;

  // Get the process ID and the total number of processors
  int MyPID = Comm.MyPID();
#ifdef HAVE_MPI
  int NumProc = Comm.NumProc();
#endif

  // Set up the printing utilities
  Teuchos::RCP<Teuchos::ParameterList> noxParamsPtr =
    Teuchos::rcp(new Teuchos::ParameterList);
  Teuchos::ParameterList& noxParams = *(noxParamsPtr.get());
  // Only print output if the "-v" flag is set on the command line
  Teuchos::ParameterList& printParams = noxParams.sublist("Printing");
  printParams.set("MyPID", MyPID); 
  printParams.set("Output Precision", 5);
  printParams.set("Output Processor", 0);
  if( verbose )
    printParams.set("Output Information", 
		NOX::Utils::OuterIteration + 
		NOX::Utils::OuterIterationStatusTest + 
		NOX::Utils::InnerIteration +
		NOX::Utils::Parameters + 
		NOX::Utils::Details + 
		NOX::Utils::Warning +
		NOX::Utils::TestDetails);
  else
    printParams.set("Output Information", NOX::Utils::Error +
		NOX::Utils::TestDetails);

  NOX::Utils printing(printParams);

  // Identify the test problem
  if (printing.isPrintType(NOX::Utils::TestDetails))
    printing.out() << "Starting epetra/NOX_NewTest/NOX_NewTest.exe" << endl;

  // Identify processor information
#ifdef HAVE_MPI
  if (printing.isPrintType(NOX::Utils::TestDetails)) {
    printing.out() << "Parallel Run" << endl;
    printing.out() << "Number of processors = " << NumProc << endl;
    printing.out() << "Print Process = " << MyPID << endl;
  }
  Comm.Barrier();
  if (printing.isPrintType(NOX::Utils::TestDetails))
    printing.out() << "Process " << MyPID << " is alive!" << endl;
  Comm.Barrier();
#else
  if (printing.isPrintType(NOX::Utils::TestDetails))
    printing.out() << "Serial Run" << endl;
#endif

  // *** Insert your testing here! ***



  // Final return value (0 = successfull, non-zero = failure)
  int status = 0;

  // Summarize test results  
  if (status == 0)
    printing.out() << "Test passed!" << endl;
  else 
    printing.out() << "Test failed!" << endl;

#ifdef HAVE_MPI
  MPI_Finalize();
#endif
  // Final return value (0 = successfull, non-zero = failure)
  return status;
}
Esempio n. 23
0
int Amesos_Klu::CreateLocalMatrixAndExporters() 
{
  ResetTimer(0);

  RowMatrixA_ = Problem_->GetMatrix(); // MS, 18-Apr-06 //
  if (RowMatrixA_ == 0) AMESOS_CHK_ERR(-1);

  const Epetra_Map &OriginalMatrixMap = RowMatrixA_->RowMatrixRowMap() ;
  const Epetra_Map &OriginalDomainMap = 
    UseTranspose()?GetProblem()->GetOperator()->OperatorRangeMap():
    GetProblem()->GetOperator()->OperatorDomainMap();
  const Epetra_Map &OriginalRangeMap = 
    UseTranspose()?GetProblem()->GetOperator()->OperatorDomainMap():
    GetProblem()->GetOperator()->OperatorRangeMap();

  NumGlobalElements_ = RowMatrixA_->NumGlobalRows();
  numentries_ = RowMatrixA_->NumGlobalNonzeros();
  assert( NumGlobalElements_ == RowMatrixA_->NumGlobalCols() );

  //
  //  Create a serial matrix
  //
  assert( NumGlobalElements_ == OriginalMatrixMap.NumGlobalElements() ) ;
  int NumMyElements_ = 0 ;
  if (MyPID_==0) NumMyElements_ = NumGlobalElements_;
  //
  //  UseDataInPlace_ is set to 1 (true) only if everything is perfectly
  //  normal.  Anything out of the ordinary reverts to the more expensive
  //  path. 
  //
  UseDataInPlace_ = ( OriginalMatrixMap.NumMyElements() ==
	       OriginalMatrixMap.NumGlobalElements() )?1:0;
  if ( ! OriginalRangeMap.SameAs( OriginalMatrixMap ) ) UseDataInPlace_ = 0 ; 
  if ( ! OriginalDomainMap.SameAs( OriginalMatrixMap ) ) UseDataInPlace_ = 0 ; 
  if ( AddZeroToDiag_ ) UseDataInPlace_ = 0 ; 
  Comm().Broadcast( &UseDataInPlace_, 1, 0 ) ;

  //
  //  Reindex matrix if necessary (and possible - i.e. CrsMatrix)
  //
  //  For now, since I don't know how to determine if we need to reindex the matrix, 
  //  we allow the user to control re-indexing through the "Reindex" parameter.
  //
  CrsMatrixA_ = dynamic_cast<Epetra_CrsMatrix *>(Problem_->GetOperator());

  if  ( Reindex_ ) {
    if ( CrsMatrixA_ == 0 ) AMESOS_CHK_ERR(-7);
#ifdef HAVE_AMESOS_EPETRAEXT
    const Epetra_Map& OriginalMap = CrsMatrixA_->RowMap();
    StdIndex_ = rcp( new Amesos_StandardIndex( OriginalMap  ) );
    //const Epetra_Map& OriginalColMap = CrsMatrixA_->RowMap();
    StdIndexDomain_ = rcp( new Amesos_StandardIndex( OriginalDomainMap  ) );
    StdIndexRange_ = rcp( new Amesos_StandardIndex( OriginalRangeMap  ) );

    StdIndexMatrix_ = StdIndex_->StandardizeIndex( CrsMatrixA_ );
#else
    std::cerr << "Amesos_Klu requires EpetraExt to reindex matrices." << std::endl 
	 <<  " Please rebuild with the EpetraExt library by adding --enable-epetraext to your configure invocation" << std::endl ;
    AMESOS_CHK_ERR(-8);
#endif
  } else { 
    StdIndexMatrix_ = RowMatrixA_ ;
  }

  //
  //  At this point, StdIndexMatrix_ points to a matrix with 
  //  standard indexing.  
  //

  //
  //  Convert Original Matrix to Serial (if it is not already)
  //
  if (UseDataInPlace_ == 1) {
    SerialMatrix_ = StdIndexMatrix_;
  } else {
    SerialMap_ = rcp(new Epetra_Map(NumGlobalElements_, NumMyElements_, 0, Comm()));
    
    if ( Problem_->GetRHS() ) 
      NumVectors_ = Problem_->GetRHS()->NumVectors() ; 
    else
      NumVectors_ = 1 ; 
    SerialXextract_ = rcp( new Epetra_MultiVector(*SerialMap_,NumVectors_));
    SerialBextract_ = rcp (new Epetra_MultiVector(*SerialMap_,NumVectors_));

    ImportToSerial_ = rcp(new Epetra_Import ( *SerialMap_, StdIndexMatrix_->RowMatrixRowMap() ) );

    if (ImportToSerial_.get() == 0) AMESOS_CHK_ERR(-9);

    // Build the vector data import/export once and only once
#define CHRIS
#ifdef CHRIS
    if(StdIndexMatrix_->RowMatrixRowMap().SameAs(StdIndexMatrix_->OperatorRangeMap()))
      ImportRangeToSerial_=ImportToSerial_;
    else
      ImportRangeToSerial_ = rcp(new Epetra_Import ( *SerialMap_, StdIndexMatrix_->OperatorRangeMap() ) );

    if(StdIndexMatrix_->RowMatrixRowMap().SameAs(StdIndexMatrix_->OperatorDomainMap()))
      ImportDomainToSerial_=ImportToSerial_;
    else
      ImportDomainToSerial_ = rcp(new Epetra_Import ( *SerialMap_, StdIndexMatrix_->OperatorDomainMap() ) );
#endif
    
    SerialCrsMatrixA_ = rcp( new Epetra_CrsMatrix(Copy, *SerialMap_, 0) ) ;
    SerialMatrix_ = &*SerialCrsMatrixA_ ;
  }

  MtxRedistTime_ = AddTime("Total matrix redistribution time", MtxRedistTime_, 0);

  return(0);
}
Esempio n. 24
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  // define an Epetra communicator
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // check number of processes
  if (Comm.NumProc() != 1) {
    if (Comm.MyPID() == 0)
      cerr << "*ERR* can be used only with one process" << endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    exit(EXIT_SUCCESS);
  }
    
  // process 0 will read an HB matrix, and store it
  // in the MSR format given by the arrays bindx and val
  int N_global;
  int N_nonzeros;
  double * val = NULL;
  int * bindx = NULL;
  double * x = NULL, * b = NULL, * xexact = NULL;
  
  FILE* fp = fopen("../HBMatrices/fidap005.rua", "r");
  if (fp == 0)
  {
    cerr << "Matrix file not available" << endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    exit(EXIT_SUCCESS);
  }
  fclose(fp);
  
  Trilinos_Util_read_hb("../HBMatrices/fidap005.rua", 0,
			&N_global, &N_nonzeros, 
			&val, &bindx,
			&x, &b, &xexact);

  // assign all the elements to process 0
  // (this code can run ONLY with one process, extensions to more
  // processes will require functions to handle update of ghost nodes)
  Epetra_Map Map(N_global,0,Comm);
  
  MSRMatrix A(Map,bindx,val);
  
  // define two vectors
  Epetra_Vector xxx(Map);
  Epetra_Vector yyy(Map);

  xxx.Random();

  A.Apply(xxx,yyy);

  cout << yyy;
  
  double norm2;
  yyy.Norm2(&norm2);

  cout << norm2 << endl;

  // free memory allocated by Trilinos_Util_read_hb
  if (val != NULL) free((void*)val);
  if (bindx != NULL) free((void*)bindx);
  if (x != NULL) free((void*)x);
  if (b != NULL) free((void*)x);
  if (xexact != NULL) free((void*)xexact);;

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
  
} /* main */
Esempio n. 25
0
int main(int argc, char *argv[]) {
    
#ifdef HAVE_MPI
  // Initialize MPI and setup an Epetra communicator
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  // If we aren't using MPI, then setup a serial communicator.
  Epetra_SerialComm Comm;
#endif

  // Some typedefs for oft-used data types
  typedef Epetra_MultiVector MV;
  typedef Epetra_Operator OP;
  typedef Belos::MultiVecTraits<double, Epetra_MultiVector> MVT;

  bool ierr;

  // Get our processor ID (0 if running serial)
  int MyPID = Comm.MyPID();

  // Verbose flag: only processor 0 should print
  bool verbose = (MyPID==0);

  // Initialize a Gallery object, from which we will select a matrix
  // Select a 2-D laplacian, of order 100
  Trilinos_Util::CrsMatrixGallery Gallery("laplace_2d", Comm);
  Gallery.Set("problem_size", 100);

  // Say hello and print some information about the gallery matrix
  if (verbose) {
    cout << "Belos Example: Block CG" << endl;
    cout << "Problem info:" << endl;
    cout << Gallery;
    cout << endl;
  }

  // Setup some more problem/solver parameters:

  // Block size
  int blocksize = 4;

  // Get a pointer to the system matrix, inside of a Teuchos::RCP
  // The Teuchos::RCP is a reference counting pointer that handles
  // garbage collection for us, so that we can perform memory allocation without
  // having to worry about freeing memory manually.
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( Gallery.GetMatrix(), false );

  // Create an Belos MultiVector, based on Epetra MultiVector
  const Epetra_Map * Map = &(A->RowMap());
  Teuchos::RCP<Epetra_MultiVector> B = 
    Teuchos::rcp( new Epetra_MultiVector(*Map,blocksize) );
  Teuchos::RCP<Epetra_MultiVector> X = 
    Teuchos::rcp( new Epetra_MultiVector(*Map,blocksize) );

  // Initialize the solution with zero and right-hand side with random entries
  X->PutScalar( 0.0 );
  B->Random();

  // Setup the linear problem, with the matrix A and the vectors X and B
  Teuchos::RCP< Belos::LinearProblem<double,MV,OP> > myProblem = 
    Teuchos::rcp( new Belos::LinearProblem<double,MV,OP>(A, X, B) );

  // The 2-D laplacian is symmetric. Specify this in the linear problem.
  myProblem->setHermitian();

  // Signal that we are done setting up the linear problem
  ierr = myProblem->setProblem();

  // Check the return from setProblem(). If this is true, there was an
  // error. This probably means we did not specify enough information for
  // the eigenproblem.
  assert(ierr == true);

  // Specify the verbosity level. Options include:
  // Belos::Errors 
  //   This option is always set
  // Belos::Warnings 
  //   Warnings (less severe than errors)
  // Belos::IterationDetails 
  //   Details at each iteration, such as the current eigenvalues
  // Belos::OrthoDetails 
  //   Details about orthogonality
  // Belos::TimingDetails
  //   A summary of the timing info for the solve() routine
  // Belos::FinalSummary 
  //   A final summary 
  // Belos::Debug 
  //   Debugging information
  int verbosity = Belos::Warnings + Belos::Errors + Belos::FinalSummary + Belos::TimingDetails;

  // Create the parameter list for the eigensolver
  Teuchos::RCP<Teuchos::ParameterList> myPL = Teuchos::rcp( new Teuchos::ParameterList() );
  myPL->set( "Verbosity", verbosity );
  myPL->set( "Block Size", blocksize );
  myPL->set( "Maximum Iterations", 100 );
  myPL->set( "Convergence Tolerance", 1.0e-8 );

  // Create the Block CG solver
  // This takes as inputs the linear problem and the solver parameters
  Belos::BlockCGSolMgr<double,MV,OP> mySolver(myProblem, myPL);

  // Solve the linear problem, and save the return code
  Belos::ReturnType solverRet = mySolver.solve();

  // Check return code of the solver: Unconverged, Failed, or OK
  switch (solverRet) {

  // UNCONVERGED
  case Belos::Unconverged:
    if (verbose) 
      cout << "Belos::BlockCGSolMgr::solve() did not converge!" << endl;
#ifdef HAVE_MPI
    MPI_Finalize();
#endif
    return 0;
    break;

  // CONVERGED
  case Belos::Converged:
    if (verbose) 
      cout << "Belos::BlockCGSolMgr::solve() converged!" << endl;
    break;
  }

  // Test residuals
  Epetra_MultiVector R( B->Map(), blocksize );

  // R = A*X
  A->Apply( *X, R );

  // R -= B 
  MVT::MvAddMv( -1.0, *B, 1.0, R, R );

  // Compute the 2-norm of each vector in the MultiVector
  // and store them to a std::vector<double>
  std::vector<double> normR(blocksize), normB(blocksize);
  MVT::MvNorm( R, normR );
  MVT::MvNorm( *B, normB );

  // Output results to screen
  if(verbose) {
    cout << scientific << setprecision(6) << showpoint;
    cout << "******************************************************\n"
         << "           Results (outside of linear solver)           \n" 
         << "------------------------------------------------------\n"
         << "  Linear System\t\tRelative Residual\n"
         << "------------------------------------------------------\n";
    for( int i=0 ; i<blocksize ; ++i ) {
      cout << "  " << i+1 << "\t\t\t" << normR[i]/normB[i] << endl;
    }
    cout << "******************************************************\n" << endl;
  }


#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
}
Esempio n. 26
0
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // set global dimension to 5, could be any number
  int NumGlobalElements = 5;
  
  // create a linear map
  Epetra_Map Map(NumGlobalElements,0,Comm);
  
  // local number of rows
  int NumMyElements = Map.NumMyElements();
  // get update list
  int * MyGlobalElements = Map.MyGlobalElements( );

  // dimension of each block
  Epetra_IntSerialDenseVector ElementSizeList(NumMyElements);

  // now construct a funky matrix. Diagonal block of block row i will have
  // dimension i+1 (don't run this code with too many nodes...). The
  // dimension of each block row is recordered in ElementSizeList.
  // Here ElementSizeList is declared as Epetra_IntSerialDenseVector,
  // but an int array is fine as well. 
  // max_blk keeps trace of the max block dimension
  
  int max_blk = 0;
  
  for( int i=0 ; i<NumMyElements ; ++i ) {
    ElementSizeList[i] = 1+MyGlobalElements[i];
    if( ElementSizeList[i] > max_blk ) max_blk =  ElementSizeList[i];
  }
  
  // create a block map based on the already declared point map
  // (used to determine NumMyElements and MyGlobalElements).
  // The same point map can be used for more block maps,
  // just change the input value of ElementSizeList
  Epetra_BlockMap BlockMap(NumGlobalElements,NumMyElements,
			   MyGlobalElements, 
			   ElementSizeList.Values(),0,Comm);

  // create a VBR matrix based on BlockMap
  Epetra_VbrMatrix A(Copy, BlockMap,2);

  int MaxBlockSize = max_blk * max_blk*100;
  
  int Indices[2];
  double* Values; Values = new double[MaxBlockSize];

  // cycle over all the local rows. 
  
  for( int i=0 ; i<NumMyElements ; ++i ) {
    
    // get GID of local row
    int GlobalNode = MyGlobalElements[i];
    // all lines but the last one will have to nonzero block-elements
    Indices[0] = GlobalNode;
    int NumEntries = 1;
    if( GlobalNode != NumGlobalElements-1 ) {
      Indices[1] = GlobalNode+1;
      NumEntries++;
    }

    // with VBR matrices, we have to insert one block at time.
    // This required two more instructions, one to start this
    // process (BeginInsertGlobalValues), and another one to
    // commit the end of submissions (EndSubmitEntries).
    
    A.BeginInsertGlobalValues(GlobalNode, NumEntries, Indices);
    // insert diagonal
    int BlockRows = ElementSizeList[i];
    for( int k=0 ; k<BlockRows * BlockRows ; ++k )
      Values[k] = 1.0*i;
    A.SubmitBlockEntry(Values,BlockRows,BlockRows,BlockRows);

    // insert off diagonal if any
    if( GlobalNode != NumGlobalElements-1 ) {
      int BlockCols = BlockRows+1;
      for( int k=0 ; k<BlockRows * BlockCols ; ++k )
	Values[k] = 1.0*i;
      A.SubmitBlockEntry(Values,BlockRows,BlockRows,BlockCols);
    }
    
    A.EndSubmitEntries();
  }

  A.FillComplete();

  cout << A;

  delete[] Values;

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(0);

}
Esempio n. 27
0
//=============================================================================
int Amesos_Mumps::SymbolicFactorization()
{

  // erase data if present. 
  if (IsSymbolicFactorizationOK_ && MDS.job != -777)
   Destroy();

  IsSymbolicFactorizationOK_ = false;
  IsNumericFactorizationOK_ = false;

  CreateTimer(Comm());
  
  CheckParameters();
  AMESOS_CHK_ERR(ConvertToTriplet(false));

#if defined(HAVE_MPI) && defined(HAVE_AMESOS_MPI_C2F)
  if (MaxProcs_ != Comm().NumProc()) 
  {
    if(MUMPSComm_) 
      MPI_Comm_free(&MUMPSComm_);

    std::vector<int> ProcsInGroup(MaxProcs_);
    for (int i = 0 ; i < MaxProcs_ ; ++i) 
      ProcsInGroup[i] = i;

    MPI_Group OrigGroup, MumpsGroup;
    MPI_Comm_group(MPI_COMM_WORLD, &OrigGroup);
    MPI_Group_incl(OrigGroup, MaxProcs_, &ProcsInGroup[0], &MumpsGroup);
    MPI_Comm_create(MPI_COMM_WORLD, MumpsGroup, &MUMPSComm_);

#ifdef MUMPS_4_9
    MDS.comm_fortran = (MUMPS_INT) MPI_Comm_c2f( MUMPSComm_);
#else

#ifndef HAVE_AMESOS_OLD_MUMPS
    MDS.comm_fortran = (DMUMPS_INT) MPI_Comm_c2f( MUMPSComm_);
#else
    MDS.comm_fortran = (F_INT) MPI_Comm_c2f( MUMPSComm_);
#endif

#endif

  } 
  else 
  {
    const Epetra_MpiComm* MpiComm = dynamic_cast<const Epetra_MpiComm*>(&Comm());
    assert (MpiComm != 0);
#ifdef MUMPS_4_9
    MDS.comm_fortran = (MUMPS_INT) MPI_Comm_c2f(MpiComm->GetMpiComm());
#else

#ifndef HAVE_AMESOS_OLD_MUMPS
    MDS.comm_fortran = (DMUMPS_INT) MPI_Comm_c2f(MpiComm->GetMpiComm());
#else
    MDS.comm_fortran = (F_INT) MPI_Comm_c2f(MpiComm->GetMpiComm());
#endif

#endif
  }
#else
  // This next three lines of code were required to make Amesos_Mumps work
  // with Ifpack_SubdomainFilter. They is usefull in all cases
  // when using MUMPS on a subdomain.
  const Epetra_MpiComm* MpiComm = dynamic_cast<const Epetra_MpiComm*>(&Comm());
  assert (MpiComm != 0);
  MDS.comm_fortran = (MUMPS_INT) MPI_Comm_c2f(MpiComm->GetMpiComm());
  // only thing I can do, use MPI_COMM_WORLD. This will work in serial as well
  // Previously, the next line was uncommented, but we don't want MUMPS to work
  // on the global MPI comm, but on the comm associated with the matrix
  //  MDS.comm_fortran = -987654;
#endif
  
  MDS.job = -1  ;     //  Initialization
  MDS.par = 1 ;       //  Host IS involved in computations
//  MDS.sym = MatrixProperty_;
  MDS.sym =  0;       //  MatrixProperty_ is unititalized.  Furthermore MUMPS 
                      //  expects only half of the matrix to be provided for
                      //  symmetric matrices.  Hence setting MDS.sym to be non-zero
                      //  indicating that the matrix is symmetric will only work
                      //  if we change ConvertToTriplet to pass only half of the 
                      //  matrix.  Bug #2331 and Bug #2332 - low priority


  RedistrMatrix(true);

  if (Comm().MyPID() < MaxProcs_) 
  {
    dmumps_c(&(MDS));   //  Initialize MUMPS
    static_cast<void>( CheckError( ) );  
  }

  MDS.n = Matrix().NumGlobalRows();

  // fix pointers for nonzero pattern of A. Numerical values
  // will be entered in PerformNumericalFactorization()
  if (Comm().NumProc() != 1) 
  {
    MDS.nz_loc = RedistrMatrix().NumMyNonzeros();

    if (Comm().MyPID() < MaxProcs_) 
    {
      MDS.irn_loc = &Row[0]; 
      MDS.jcn_loc = &Col[0];
    }
  } 
  else 
  {
    if (Comm().MyPID() == 0) 
    {
      MDS.nz = Matrix().NumMyNonzeros();
      MDS.irn = &Row[0]; 
      MDS.jcn = &Col[0]; 
    }
  }

  // scaling if provided by the user
  if (RowSca_ != 0) 
  {
    MDS.rowsca = RowSca_;
    MDS.colsca = ColSca_;
  }

  // given ordering if provided by the user
  if (PermIn_ != 0) 
  {
    MDS.perm_in = PermIn_;
  }

  MDS.job = 1;     // Request symbolic factorization

  SetICNTLandCNTL();

  // Perform symbolic factorization

  ResetTimer();

  if (Comm().MyPID() < MaxProcs_) 
    dmumps_c(&(MDS));

  SymFactTime_ = AddTime("Total symbolic factorization time", SymFactTime_);

  int IntWrong = CheckError()?1:0 ; 
  int AnyWrong;
  Comm().SumAll( &IntWrong, &AnyWrong, 1 ) ; 
  bool Wrong = AnyWrong > 0 ; 


  if ( Wrong ) {
      AMESOS_CHK_ERR( StructurallySingularMatrixError ) ; 
  }

  IsSymbolicFactorizationOK_ = true ;
  NumSymbolicFact_++;  

  return 0;
}
int main(int argc, char *argv[]) {
  int i, j, info;
  const double one = 1.0;
  const double zero = 0.0;
  Teuchos::LAPACK<int,double> lapack;

#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  int MyPID = Comm.MyPID();

  //  Dimension of the matrix
  int m = 500;
  int n = 100;

  // Construct a Map that puts approximately the same number of
  // equations on each processor.

  Epetra_Map RowMap(m, 0, Comm);
  Epetra_Map ColMap(n, 0, Comm);

  // Get update list and number of local equations from newly created Map.

  int NumMyRowElements = RowMap.NumMyElements();
  
  std::vector<int> MyGlobalRowElements(NumMyRowElements);
  RowMap.MyGlobalElements(&MyGlobalRowElements[0]);

  /* We are building an m by n matrix with entries
    
              A(i,j) = k*(si)*(tj - 1) if i <= j
               = k*(tj)*(si - 1) if i  > j
  
     where si = i/(m+1) and tj = j/(n+1) and k = 1/(n+1).
  */

  // Create an Epetra_Matrix
  Teuchos::RCP<Epetra_CrsMatrix> A = Teuchos::rcp( new Epetra_CrsMatrix(Copy, RowMap, n) );

  // Compute coefficients for discrete integral operator
  std::vector<double> Values(n);
  std::vector<int> Indices(n);
  double inv_mp1 = one/(m+1);
  double inv_np1 = one/(n+1);
  for (i=0; i<n; i++) { Indices[i] = i; }
  
  for (i=0; i<NumMyRowElements; i++) {
    //
    for (j=0; j<n; j++) {
      //
      if ( MyGlobalRowElements[i] <= j ) {
        Values[j] = inv_np1 * ( (MyGlobalRowElements[i]+one)*inv_mp1 ) * ( (j+one)*inv_np1 - one );  // k*(si)*(tj-1)
      }
      else {
        Values[j] = inv_np1 * ( (j+one)*inv_np1 ) * ( (MyGlobalRowElements[i]+one)*inv_mp1 - one );  // k*(tj)*(si-1)
      }
    }
    info = A->InsertGlobalValues(MyGlobalRowElements[i], n, &Values[0], &Indices[0]);
    assert( info==0 );
  }

  // Finish up
  info = A->FillComplete(ColMap, RowMap);
  assert( info==0 );
  info = A->OptimizeStorage();
  assert( info==0 );
  A->SetTracebackMode(1); // Shutdown Epetra Warning tracebacks

  //************************************
  // Start the block Arnoldi iteration
  //***********************************
  //
  //  Variables used for the Block Arnoldi Method
  //
  int nev = 4;
  int blockSize = 1;
  int numBlocks = 10;
  int maxRestarts = 20;
  int verbosity = Anasazi::Errors + Anasazi::Warnings + Anasazi::FinalSummary;
  double tol = lapack.LAMCH('E');
  std::string which = "LM";
  //
  // Create parameter list to pass into solver
  //
  Teuchos::ParameterList MyPL;
  MyPL.set( "Verbosity", verbosity );
  MyPL.set( "Which", which );
  MyPL.set( "Block Size", blockSize );
  MyPL.set( "Num Blocks", numBlocks );
  MyPL.set( "Maximum Restarts", maxRestarts );
  MyPL.set( "Convergence Tolerance", tol );

  typedef Anasazi::MultiVec<double> MV;
  typedef Anasazi::Operator<double> OP;

  // Create an Anasazi::EpetraMultiVec for an initial vector to start the solver. 
  // Note:  This needs to have the same number of columns as the blocksize.
  Teuchos::RCP<Anasazi::EpetraMultiVec> ivec = Teuchos::rcp( new Anasazi::EpetraMultiVec(ColMap, blockSize) );
  ivec->MvRandom();

  // Call the constructor for the (A^T*A) operator
  Teuchos::RCP<Anasazi::EpetraSymOp>  Amat = Teuchos::rcp( new Anasazi::EpetraSymOp(A) );  
  Teuchos::RCP<Anasazi::BasicEigenproblem<double, MV, OP> > MyProblem =
    Teuchos::rcp( new Anasazi::BasicEigenproblem<double, MV, OP>(Amat, ivec) );

  // Inform the eigenproblem that the matrix A is symmetric
  MyProblem->setHermitian(true);

  // Set the number of eigenvalues requested and the blocksize the solver should use
  MyProblem->setNEV( nev );

  // Inform the eigenproblem that you are finished passing it information
  bool boolret = MyProblem->setProblem();
  if (boolret != true) {
    if (MyPID == 0) {
      cout << "Anasazi::BasicEigenproblem::setProblem() returned with error." << endl;
    }
#ifdef HAVE_MPI
    MPI_Finalize() ;
#endif
    return -1;
  }

  // Initialize the Block Arnoldi solver
  Anasazi::BlockKrylovSchurSolMgr<double, MV, OP> MySolverMgr(MyProblem, MyPL);
  
  // Solve the problem to the specified tolerances or length
  Anasazi::ReturnType returnCode = MySolverMgr.solve();
  if (returnCode != Anasazi::Converged && MyPID==0) {
    cout << "Anasazi::EigensolverMgr::solve() returned unconverged." << endl;
  }

  // Get the eigenvalues and eigenvectors from the eigenproblem
  Anasazi::Eigensolution<double,MV> sol = MyProblem->getSolution();
  std::vector<Anasazi::Value<double> > evals = sol.Evals;
  int numev = sol.numVecs;

  if (numev > 0) {
    
    // Compute singular values/vectors and direct residuals.
    //
    // Compute singular values which are the square root of the eigenvalues
    if (MyPID==0) {
      cout<<"------------------------------------------------------"<<endl;
      cout<<"Computed Singular Values: "<<endl;
      cout<<"------------------------------------------------------"<<endl;
    }
    for (i=0; i<numev; i++) { evals[i].realpart = Teuchos::ScalarTraits<double>::squareroot( evals[i].realpart ); }
    //
    // Compute left singular vectors :  u = Av/sigma
    //
    std::vector<double> tempnrm(numev), directnrm(numev);
    std::vector<int> index(numev);
    for (i=0; i<numev; i++) { index[i] = i; }
    Anasazi::EpetraMultiVec Av(RowMap,numev), u(RowMap,numev);
    Anasazi::EpetraMultiVec* evecs = dynamic_cast<Anasazi::EpetraMultiVec* >(sol.Evecs->CloneViewNonConst( index ));
    Teuchos::SerialDenseMatrix<int,double> S(numev,numev);
    A->Apply( *evecs, Av );
    Av.MvNorm( tempnrm );
    for (i=0; i<numev; i++) { S(i,i) = one/tempnrm[i]; };
    u.MvTimesMatAddMv( one, Av, S, zero );
    //
    // Compute direct residuals : || Av - sigma*u ||
    //
    for (i=0; i<numev; i++) { S(i,i) = evals[i].realpart; }
    Av.MvTimesMatAddMv( -one, u, S, one );
    Av.MvNorm( directnrm );
    if (MyPID==0) {
      cout.setf(std::ios_base::right, std::ios_base::adjustfield);
      cout<<std::setw(16)<<"Singular Value"
        <<std::setw(20)<<"Direct Residual"
        <<endl;
      cout<<"------------------------------------------------------"<<endl;
      for (i=0; i<numev; i++) {
        cout<<std::setw(16)<<evals[i].realpart
          <<std::setw(20)<<directnrm[i] 
          <<endl;
      }  
      cout<<"------------------------------------------------------"<<endl;
    }
  }
  
#ifdef EPETRA_MPI
    MPI_Finalize() ;
#endif
  
  return 0;
}
Esempio n. 29
0
int main(int argc, char *argv[]) {
#ifdef EPETRA_MPI
  // Initialize MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm( MPI_COMM_WORLD );
#else
  Epetra_SerialComm Comm;
#endif
  bool verbose = false;  // used to set verbose false on non-root processors
  bool verbose1 = false; // user's command-line argument
  // Check if we should print results to standard out
  if (argc>1) if (argv[1][0]=='-' && argv[1][1]=='v') {
    verbose = true;
    if (Comm.MyPID()==0) verbose1 = true;
  }
  
  if (verbose1)
    cout << AztecOO_Version() << endl << endl;
  if (verbose)
    cout << Comm <<endl;
  
  long long NumGlobalElements = 5;
  long long IndexBase = 0;

  Epetra_Map Map(NumGlobalElements, IndexBase, Comm);
  int NumMyElements = Map.NumMyElements();
  long long * MyGlobalElements = Map.MyGlobalElements64();

  Epetra_CrsMatrix A(Copy,Map,0);
  Epetra_Vector b(Map);
  Epetra_Vector x(Map);
  Epetra_Vector xx(Map);

  if (verbose)
    cout << "Proc = " << Comm.MyPID() << "  NumMyElements=" << NumMyElements << endl;
    
  for (int i=0; i<NumMyElements; i++) {
    long long index = MyGlobalElements[i]; // Global Diagonal location
    double value =  -pow(((double)10),((double) index));
    if (index==0) 
      value = 1.0; // First value will be positive 1, reminder will be negative powers of 10
    b[i] = value;  // RHS has same value as diagonal
    xx[i] = 1.0;   // Makes solution all ones
    x[i] = 0.0;     // Start with zero guess.
    A.InsertGlobalValues(index, 1, &value, &index);
  }

  A.FillComplete();	// Signal that data entries are complete.
   
  if (verbose1) cout << "A = " << endl;
  if (verbose) cout << A << endl;
  if (verbose1) cout << "xx = " << endl;
  if (verbose) cout << xx << endl;
  if (verbose1) cout << "x = " << endl;
  if (verbose) cout << x << endl;
  if (verbose1) cout << "b = " << endl;
  if (verbose) cout << b << endl;

  // Want to solve Ax=b
  Epetra_LinearProblem problem(&A, &x, &b);

  AztecOO solver(problem);

  solver.SetAztecOption(AZ_scaling, AZ_none);  // We want pure GMRES
  solver.SetAztecOption(AZ_precond, AZ_none);
  solver.SetAztecOption(AZ_solver, AZ_gmres);
  solver.SetAztecOption(AZ_max_iter, NumGlobalElements); // Set equal to global dimension
  solver.SetAztecOption(AZ_kspace, NumGlobalElements);
  solver.SetAztecOption(AZ_diagnostics, AZ_none);
  if (!verbose)
      solver.SetAztecOption(AZ_output, AZ_none);

  double single_error = 0.0;
  double double_error = 0.0;
  for (int i=0; i<5; i++) {
    
    if (i==0) {
      solver.SetAztecOption(AZ_orthog, AZ_single_classic);
      solver.Iterate(NumGlobalElements, 1.0E-14);
      single_error = solver.RecursiveResidual();
      
    }
    else if (i==1)  {
      solver.SetAztecOption(AZ_orthog, AZ_double_classic);
      solver.Iterate(NumGlobalElements, 1.0E-14);
      double_error = solver.RecursiveResidual();
      assert(double_error < single_error); // Error from double classic should be less than single
    }
    else if (i==2)  {
      solver.SetAztecOption(AZ_orthog, AZ_single_modified);
      solver.Iterate(NumGlobalElements, 1.0E-14);
      single_error = solver.RecursiveResidual();
    }
    else if (i==3)  {
      solver.SetAztecOption(AZ_orthog, AZ_double_modified);
      solver.Iterate(NumGlobalElements, 1.0E-14);
      double_error = solver.RecursiveResidual();
      assert(double_error < single_error); // Error from double classic should be less than single
    }
    else if (i==4)  {
      solver.SetAztecOption(AZ_solver, AZ_bicgstab);
      solver.Iterate(NumGlobalElements, 1.0E-14);
      assert(solver.RecursiveResidual()>single_error); // BiCGSTAB should always be worse than any GMRES answer
    }

    if (verbose1)
      cout << "Solver performed " << solver.NumIters() << " iterations." << endl
	   << "Norm of Recursive residual = " << solver.RecursiveResidual() << endl << endl;

    assert(solver.NumIters()==NumGlobalElements);
      

  // Print out the result
    if (verbose1) cout << "Computed Solution = " << endl;
    if (verbose) cout << x << endl;

    x.PutScalar(0.0);    
  }

  if (verbose1)
    cout << "Computed solution for 5x5 matrix diag(1, -10, -100, -1000, -10000) using the following unpreconditioned unscaled methods:" << endl
	 << "  1) GMRES with single step classical Gram-Schmidt" << endl
	 << "  2) GMRES with double step classical Gram-Schmidt" << endl
	 << "  3) GMRES with single step modified Gram-Schmidt" << endl
	 << "  4) GMRES with double step modified Gram-Schmidt" << endl
	 << "  5) BiCGSTAB" << endl << endl
	 << "Confirmed that double steps provide superior answer to single step" << endl
	 << "and that GMRES is superior to BiCGSTAB" << endl << endl;
  if (Comm.MyPID()==0)
    cout << "All tests passed." << endl;
    
#ifdef EPETRA_MPI
  MPI_Finalize() ;
#endif
  return 0;
}
Esempio n. 30
0
void RieszRep::distributeDofs(){
  int myRank = Teuchos::GlobalMPISession::getRank();
  int numRanks = Teuchos::GlobalMPISession::getNProc();
#ifdef HAVE_MPI
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
  //cout << "rank: " << rank << " of " << numProcs << endl;
#else
  Epetra_SerialComm Comm;
#endif  

  // the code below could stand to be reworked; I'm pretty sure this is not the best way to distribute the data, and it would also be best to get rid of the iteration over the global set of active elements.  But a similar point could be made about this method as a whole: do we really need to distribute all the dofs to every rank?  It may be best to eliminate this method altogether.
  
  vector<GlobalIndexType> cellIDsByPartitionOrdering;
  for (int rank=0; rank<numRanks; rank++) {
    set<GlobalIndexType> cellIDsForRank = _mesh->globalDofAssignment()->cellsInPartition(rank);
    cellIDsByPartitionOrdering.insert(cellIDsByPartitionOrdering.end(), cellIDsForRank.begin(), cellIDsForRank.end());
  }
  // determine inverse map:
  map<GlobalIndexType,int> ordinalForCellID;
  for (int ordinal=0; ordinal<cellIDsByPartitionOrdering.size(); ordinal++) {
    GlobalIndexType cellID = cellIDsByPartitionOrdering[ordinal];
    ordinalForCellID[cellID] = ordinal;
//    cout << "ordinalForCellID[" << cellID << "] = " << ordinal << endl;
  }
  
  for (int cellOrdinal=0; cellOrdinal<cellIDsByPartitionOrdering.size(); cellOrdinal++) {
    GlobalIndexType cellID = cellIDsByPartitionOrdering[cellOrdinal];
    ElementTypePtr elemTypePtr = _mesh->getElementType(cellID);
    DofOrderingPtr testOrderingPtr = elemTypePtr->testOrderPtr;
    int numDofs = testOrderingPtr->totalDofs();
    
    int cellIDPartition = _mesh->partitionForCellID(cellID);
    bool isInPartition = (cellIDPartition == myRank);

    int numMyDofs;
    FieldContainer<double> dofs(numDofs);
    if (isInPartition){  // if in partition
      numMyDofs = numDofs;
      dofs = _rieszRepDofs[cellID];
    } else{
      numMyDofs = 0;
    }
    
    Epetra_Map dofMap(numDofs,numMyDofs,0,Comm); 
    Epetra_Vector distributedRieszDofs(dofMap);
    if (isInPartition) {
      for (int i = 0;i<numMyDofs;i++) { // shouldn't activate on off-proc partitions
        distributedRieszDofs.ReplaceGlobalValues(1,&dofs(i),&i);
      }     
    }
    Epetra_Map importMap(numDofs,numDofs,0,Comm); // every proc should own their own copy of the dofs
    Epetra_Import testDofImporter(importMap, dofMap); 
    Epetra_Vector globalRieszDofs(importMap);
    globalRieszDofs.Import(distributedRieszDofs, testDofImporter, Insert);  
    if (!isInPartition){
      for (int i = 0;i<numDofs;i++){
        dofs(i) = globalRieszDofs[i];
      }      
    }
    _rieszRepDofsGlobal[cellID] = dofs;
//    { // debugging
//      ostringstream cellIDlabel;
//      cellIDlabel << "cell " << cellID << " _rieszRepDofsGlobal, after global import";
//      TestSuite::serializeOutput(cellIDlabel.str(), _rieszRepDofsGlobal[cellID]);
//    }
  }
  
  // distribute norms as well
  GlobalIndexType numElems = _mesh->numActiveElements();
  set<GlobalIndexType> rankLocalCellIDs = _mesh->cellIDsInPartition();
  IndexType numMyElems = rankLocalCellIDs.size();
  GlobalIndexType myElems[numMyElems];
  // build cell index
  GlobalIndexType myCellOrdinal = 0;

  double rankLocalRieszNorms[numMyElems];
  
  for (set<GlobalIndexType>::iterator cellIDIt = rankLocalCellIDs.begin(); cellIDIt != rankLocalCellIDs.end(); cellIDIt++) {
    GlobalIndexType cellID = *cellIDIt;
    myElems[myCellOrdinal] = ordinalForCellID[cellID];
    rankLocalRieszNorms[myCellOrdinal] = _rieszRepNormSquared[cellID];
    myCellOrdinal++;
  }
  Epetra_Map normMap((GlobalIndexTypeToCast)numElems,(int)numMyElems,(GlobalIndexTypeToCast *)myElems,(GlobalIndexTypeToCast)0,Comm);

  Epetra_Vector distributedRieszNorms(normMap);
  int err = distributedRieszNorms.ReplaceGlobalValues(numMyElems,rankLocalRieszNorms,(GlobalIndexTypeToCast *)myElems);
  if (err != 0) {
    cout << "RieszRep::distributeDofs(): on rank" << myRank << ", ReplaceGlobalValues returned error code " << err << endl;
  }

  Epetra_Map normImportMap((GlobalIndexTypeToCast)numElems,(GlobalIndexTypeToCast)numElems,0,Comm);
  Epetra_Import normImporter(normImportMap,normMap); 
  Epetra_Vector globalNorms(normImportMap);
  globalNorms.Import(distributedRieszNorms, normImporter, Add);  // add should be OK (everything should be zeros)

  for (int cellOrdinal=0; cellOrdinal<cellIDsByPartitionOrdering.size(); cellOrdinal++) {
    GlobalIndexType cellID = cellIDsByPartitionOrdering[cellOrdinal];
    _rieszRepNormSquaredGlobal[cellID] = globalNorms[cellOrdinal];
//    if (myRank==0) cout << "_rieszRepNormSquaredGlobal[" << cellID << "] = " << globalNorms[cellOrdinal] << endl;
  }
 
}