コード例 #1
0
void BlockCrsMatrix::ExtractBlock(Epetra_CrsMatrix & BaseMatrix, const long long Row, const long long Col)
{
  if(Epetra_CrsMatrix::RowMatrixRowMap().GlobalIndicesLongLong() && BaseMatrix.RowMatrixRowMap().GlobalIndicesLongLong())
	return TExtractBlock<long long>(BaseMatrix, Row, Col);
  else
    throw "EpetraExt::BlockCrsMatrix::ExtractBlock: Global indices not long long";
}
コード例 #2
0
void BlockCrsMatrix::ExtractBlock(Epetra_CrsMatrix & BaseMatrix, const int Row, const int Col)
{
  if(Epetra_CrsMatrix::RowMatrixRowMap().GlobalIndicesInt() && BaseMatrix.RowMatrixRowMap().GlobalIndicesInt())
	return TExtractBlock<int>(BaseMatrix, Row, Col);
  else
    throw "EpetraExt::BlockCrsMatrix::ExtractBlock: Global indices not int";
}
コード例 #3
0
void BlockCrsMatrix::TExtractBlock(Epetra_CrsMatrix & BaseMatrix, const int_type Row, const int_type Col)
{
  std::vector<int_type>& RowIndices_ = TRowIndices<int_type>();
  std::vector< std::vector<int_type> >& RowStencil_ = TRowStencil<int_type>();
  int_type RowOffset = RowIndices_[(std::size_t)Row] * ROffset_;
  int_type ColOffset = (RowIndices_[(std::size_t)Row] + RowStencil_[(std::size_t)Row][(std::size_t)Col]) * COffset_;

//  const Epetra_CrsGraph & BaseGraph = BaseMatrix.Graph();
  const Epetra_BlockMap & BaseMap = BaseMatrix.RowMatrixRowMap();
  //const Epetra_BlockMap & BaseColMap = BaseMatrix.RowMatrixColMap();

  // This routine extracts entries of a BaseMatrix from a big  BlockCrsMatrix
  // It performs the following operation on the global IDs row-by-row
  // BaseMatrix.val[i][j] = this->val[i+rowOffset][j+ColOffset] 

  int MaxIndices = BaseMatrix.MaxNumEntries();
  vector<int_type> Indices(MaxIndices);
  vector<double> Values(MaxIndices);
  int NumIndices;
  int_type indx,icol;
  double* BlkValues;
  int *BlkIndices;
  int BlkNumIndices;
  int ierr=0;
  (void) ierr; // Forestall compiler warning for unused variable.

  for (int i=0; i<BaseMap.NumMyElements(); i++) {

    // Get pointers to values and indices of whole block matrix row
    int_type BaseRow = (int_type) BaseMap.GID64(i);
    int myBlkBaseRow = this->RowMatrixRowMap().LID(BaseRow + RowOffset);
    ierr = this->ExtractMyRowView(myBlkBaseRow, BlkNumIndices, BlkValues, BlkIndices); 

    NumIndices = 0;
    // Grab columns with global indices in correct range for this block
    for( int l = 0; l < BlkNumIndices; ++l ) {
       icol = (int_type) this->RowMatrixColMap().GID64(BlkIndices[l]);
       indx = icol - ColOffset;
       if (indx >= 0 && indx < COffset_) {
         Indices[NumIndices] = indx;
         Values[NumIndices] = BlkValues[l];
	 NumIndices++;
       }
    }

    //Load this row into base matrix
    BaseMatrix.ReplaceGlobalValues(BaseRow, NumIndices, &Values[0], &Indices[0] );

  }
}
コード例 #4
0
int main(int argc, char *argv[])
{

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif

  // get the epetra matrix from the Gallery
  int nx = 8;
  int ny = 8 * Comm.NumProc();

  ParameterList GaleriList;
  GaleriList.set("nx", nx);
  GaleriList.set("ny", ny);
  GaleriList.set("mx", 1);
  GaleriList.set("my", Comm.NumProc());

  Epetra_Map* Map = CreateMap("Cartesian2D", Comm, GaleriList);
  Epetra_CrsMatrix* A = CreateCrsMatrix("Laplace2D", Map, GaleriList);

  Epetra_Vector LHS(*Map); LHS.Random();
  Epetra_Vector RHS(*Map); RHS.PutScalar(0.0);

  Epetra_LinearProblem Problem(A, &LHS, &RHS);

  AztecOO solver(Problem);

  Init();

  try {

    Space S(-1, A->NumMyRows(), A->RowMatrixRowMap().MyGlobalElements());
    Operator A_MLAPI(S, S, A, false);

    Teuchos::ParameterList MLList;
    MLList.set("max levels",3);
    MLList.set("increasing or decreasing","increasing");
    MLList.set("aggregation: type", "Uncoupled");
    MLList.set("aggregation: damping factor", 0.0);
    MLList.set("smoother: type","symmetric Gauss-Seidel");
    MLList.set("smoother: sweeps",1);
    MLList.set("smoother: damping factor",1.0);
    MLList.set("coarse: max size",3);
    MLList.set("smoother: pre or post", "both");
    MLList.set("coarse: type","Amesos-KLU");

    MultiLevelSA Prec_MLAPI(A_MLAPI, MLList);

    Epetra_Operator* Prec = new EpetraBaseOperator(A->RowMatrixRowMap(), Prec_MLAPI);

    solver.SetPrecOperator(Prec);
    solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
    solver.SetAztecOption(AZ_output, 32);
    solver.Iterate(500, 1e-12);

    // destroy the preconditioner
    delete Prec;

  }
  catch (const int e) {
    cerr << "Caught exception, code = " << e << endl;
  }
  catch (...) {
    cerr << "Caught exception..." << endl;
  }

  Finalize();

  // compute the real residual

  double residual;
  LHS.Norm2(&residual);

  if( Comm.MyPID()==0 ) {
    cout << "||b-Ax||_2 = " << residual << endl;
  }

  delete A;
  delete Map;

  // for testing purposes
  if (residual > 1e-5)
    exit(EXIT_FAILURE);

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return(EXIT_SUCCESS);
}
コード例 #5
0
ファイル: GMGSolver.cpp プロジェクト: Kun-Qu/Camellia
int GMGSolver::solve() {
  int rank = Teuchos::GlobalMPISession::getRank();
  
  // in place of doing the scaling ourselves, for the moment I've switched
  // over to using Aztec's built-in scaling.  This appears to be functionally identical.
  bool useAztecToScaleDiagonally = true;
  
  AztecOO solver(problem());
  
  Epetra_CrsMatrix *A = dynamic_cast<Epetra_CrsMatrix *>( problem().GetMatrix() );
  
  if (A == NULL) {
    cout << "Error: GMGSolver requires an Epetra_CrsMatrix.\n";
    TEUCHOS_TEST_FOR_EXCEPTION(true, std::invalid_argument, "Error: GMGSolver requires an Epetra_CrsMatrix.\n");
  }
  
//  EpetraExt::RowMatrixToMatlabFile("/tmp/A_pre_scaling.dat",*A);

//  Epetra_MultiVector *b = problem().GetRHS();
//  EpetraExt::MultiVectorToMatlabFile("/tmp/b_pre_scaling.dat",*b);

//  Epetra_MultiVector *x = problem().GetLHS();
//  EpetraExt::MultiVectorToMatlabFile("/tmp/x_initial_guess.dat",*x);
  
  const Epetra_Map* map = &A->RowMatrixRowMap();
  
  Epetra_Vector diagA(*map);
  A->ExtractDiagonalCopy(diagA);

//  EpetraExt::MultiVectorToMatlabFile("/tmp/diagA.dat",diagA);
//  
  Epetra_Vector scale_vector(*map);
  Epetra_Vector diagA_sqrt_inv(*map);
  Epetra_Vector diagA_inv(*map);
  
  if (_diagonalScaling && !useAztecToScaleDiagonally) {
    int length = scale_vector.MyLength();
    for (int i=0; i<length; i++) scale_vector[i] = 1.0 / sqrt(fabs(diagA[i]));

    problem().LeftScale(scale_vector);
    problem().RightScale(scale_vector);
  }
  
  Teuchos::RCP<Epetra_MultiVector> diagA_ptr = Teuchos::rcp( &diagA, false );

  _gmgOperator.setStiffnessDiagonal(diagA_ptr);
  
  _gmgOperator.setApplyDiagonalSmoothing(_diagonalSmoothing);
  _gmgOperator.setFineSolverUsesDiagonalScaling(_diagonalScaling);
  
  _gmgOperator.computeCoarseStiffnessMatrix(A);

  if (_diagonalScaling && useAztecToScaleDiagonally) {
    solver.SetAztecOption(AZ_scaling, AZ_sym_diag);
  } else {
    solver.SetAztecOption(AZ_scaling, AZ_none);
  }
  if (_useCG) {
    if (_computeCondest) {
      solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
    } else {
      solver.SetAztecOption(AZ_solver, AZ_cg);
    }
  } else {
    solver.SetAztecOption(AZ_kspace, 200); // default is 30
    if (_computeCondest) {
      solver.SetAztecOption(AZ_solver, AZ_gmres_condnum);
    } else {
      solver.SetAztecOption(AZ_solver, AZ_gmres);
    }
  }
  
  solver.SetPrecOperator(&_gmgOperator);
//  solver.SetAztecOption(AZ_precond, AZ_none);
  solver.SetAztecOption(AZ_precond, AZ_user_precond);
  solver.SetAztecOption(AZ_conv, _azConvergenceOption);
//  solver.SetAztecOption(AZ_output, AZ_last);
  solver.SetAztecOption(AZ_output, _azOutput);
  
  int solveResult = solver.Iterate(_maxIters,_tol);
  
  const double* status = solver.GetAztecStatus();
  int remainingIters = _maxIters;

  int whyTerminated = status[AZ_why];
  int maxRestarts = 1;
  int numRestarts = 0;
  while ((whyTerminated==AZ_loss) && (numRestarts < maxRestarts)) {
    remainingIters -= status[AZ_its];
    if (rank==0) cout << "Aztec warned that the recursive residual indicates convergence even though the true residual is too large.  Restarting with the new solution as initial guess, with maxIters = " << remainingIters << endl;
    solveResult = solver.Iterate(remainingIters,_tol);
    whyTerminated = status[AZ_why];
    numRestarts++;
  }
  remainingIters -= status[AZ_its];
  _iterationCount = _maxIters - remainingIters;
  _condest = solver.Condest(); // will be -1 if running without condest
  
  if (rank==0) {
    switch (whyTerminated) {
      case AZ_normal:
//        cout << "whyTerminated: AZ_normal " << endl;
        break;
      case AZ_param:
        cout << "whyTerminated: AZ_param " << endl;
        break;
      case AZ_breakdown:
        cout << "whyTerminated: AZ_breakdown " << endl;
        break;
      case AZ_loss:
        cout << "whyTerminated: AZ_loss " << endl;
        break;
      case AZ_ill_cond:
        cout << "whyTerminated: AZ_ill_cond " << endl;
        break;
      case AZ_maxits:
        cout << "whyTerminated: AZ_maxits " << endl;
        break;
      default:
        break;
    }
  }
  
//  Epetra_MultiVector *x = problem().GetLHS();
//  EpetraExt::MultiVectorToMatlabFile("/tmp/x.dat",*x);
  
  if (_diagonalScaling && !useAztecToScaleDiagonally) {
    // reverse the scaling here
    scale_vector.Reciprocal(scale_vector);
    problem().LeftScale(scale_vector);
    problem().RightScale(scale_vector);
  }

  double norminf = A->NormInf();
  double normone = A->NormOne();
  
  int numIters = solver.NumIters();
  
  if (_printToConsole) {
    cout << "\n Inf-norm of stiffness matrix after scaling = " << norminf;
    cout << "\n One-norm of stiffness matrix after scaling = " << normone << endl << endl;
    cout << "Num iterations: " << numIters << endl;
  }
  
  _gmgOperator.setStiffnessDiagonal(Teuchos::rcp((Epetra_MultiVector*) NULL ));
  
  return solveResult;
}
コード例 #6
0
ファイル: BlockCheby.cpp プロジェクト: haripandey/trilinos
int main(int argc, char *argv[]) {

#ifdef HAVE_MPI
  MPI_Init(&argc, &argv);
  Epetra_MpiComm Comm(MPI_COMM_WORLD);
#else
  Epetra_SerialComm Comm;
#endif
  
  // initialize the random number generator
  int ml_one = 1;
  ML_srandom1(&ml_one);
  // ===================== //
  // create linear problem //
  // ===================== //
  Epetra_CrsMatrix *BadMatrix;
  int * i_blockids;
  int numblocks;

  EpetraExt::MatlabFileToCrsMatrix("samplemat.dat",Comm,BadMatrix);
  const Epetra_Map *Map=&BadMatrix->RowMap();

  // Read in the block GLOBAL ids
  Epetra_Vector* d_blockids;
  int rv=EpetraExt::MatrixMarketFileToVector("blockids.dat",*Map,d_blockids);
  i_blockids=new int[d_blockids->MyLength()];
  numblocks=-1;
  for(int i=0;i<d_blockids->MyLength();i++){
    i_blockids[i]=(int)(*d_blockids)[i]-1;
    numblocks=MAX(numblocks,i_blockids[i]);
  }
  numblocks++;

  BadMatrix->FillComplete();  BadMatrix->OptimizeStorage();
  int N=BadMatrix->RowMatrixRowMap().NumMyElements();
  
  // Create the trivial blockID list
  int * trivial_blockids=new int[N];
  for(int i=0;i<N;i++)
    trivial_blockids[i]=i;    
  
  Epetra_Vector LHS(*Map); 
  Epetra_Vector RHS(*Map);
  Epetra_LinearProblem BadProblem(BadMatrix, &LHS, &RHS);

  Teuchos::ParameterList MLList;
  double TotalErrorResidual = 0.0, TotalErrorExactSol = 0.0;
  char mystring[80];

  // ====================== //
  // ML Cheby 
  // ====================== //
  if (Comm.MyPID() == 0) PrintLine();
  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type","Chebyshev");
  MLList.set("coarse: type","Amesos-KLU");    
  MLList.set("max levels",2);
  MLList.set("aggregation: threshold",.02);
  MLList.set("ML output",10);
  MLList.set("smoother: polynomial order",2);  
  strcpy(mystring,"Cheby");
  TestMultiLevelPreconditioner(mystring, MLList, BadProblem,
                               TotalErrorResidual, TotalErrorExactSol);

  // These tests only work in serial due to how the block id's are written.
  if(Comm.NumProc()==1){
    // ====================== //
    // ML Block Cheby (Trivial)
    // ====================== //
    if (Comm.MyPID() == 0) PrintLine(); 
    ML_Epetra::SetDefaults("SA",MLList); 
    MLList.set("smoother: type","Block Chebyshev");
    MLList.set("smoother: Block Chebyshev number of blocks",N);
    MLList.set("smoother: Block Chebyshev block list",trivial_blockids);
    MLList.set("coarse: type","Amesos-KLU");  
    MLList.set("max levels",2);
    MLList.set("ML output",10);  
    MLList.set("smoother: polynomial order",2);
    strcpy(mystring,"ML Block Cheby (Trivial)");
    TestMultiLevelPreconditioner(mystring, MLList, BadProblem,
                                 TotalErrorResidual, TotalErrorExactSol);
  
  
    // ====================== //
    // ML Block Cheby (Smart)
    // ====================== //  
    if (Comm.MyPID() == 0) PrintLine();
    ML_Epetra::SetDefaults("SA",MLList);  
    MLList.set("smoother: type","Block Chebyshev");
    MLList.set("smoother: Block Chebyshev number of blocks",numblocks);
    MLList.set("smoother: Block Chebyshev block list",i_blockids);    
    MLList.set("coarse: type","Amesos-KLU");  
    MLList.set("max levels",2);
    MLList.set("ML output",10);  
    MLList.set("smoother: polynomial order",2);
    strcpy(mystring,"ML Block Cheby (Smart)");
    TestMultiLevelPreconditioner(mystring, MLList, BadProblem,
                                 TotalErrorResidual, TotalErrorExactSol);
  }
  
#if defined(HAVE_ML_IFPACK)
  // ====================== //
  // IFPACK Cheby 
  // ====================== //
  if (Comm.MyPID() == 0) PrintLine();
  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type","IFPACK-Chebyshev");
  MLList.set("coarse: type","Amesos-KLU");    
  MLList.set("max levels",2);
  MLList.set("ML output",10);
  MLList.set("smoother: polynomial order",2);  
  strcpy(mystring,"IFPACK Cheby");
  TestMultiLevelPreconditioner(mystring, MLList, BadProblem,
                               TotalErrorResidual, TotalErrorExactSol);

  // ====================== //
  // IFPACK Block Cheby (Trivial)
  // ====================== //  
  int NumBlocks=Map->NumMyElements();
  int *BlockStarts=new int[NumBlocks+1];
  int *Blockids=new int [NumBlocks];
  for(int i=0;i<NumBlocks;i++){
    BlockStarts[i]=i;
    Blockids[i]=Map->GID(i);
  }
  BlockStarts[NumBlocks]=NumBlocks;
  if (Comm.MyPID() == 0) PrintLine();
  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type","IFPACK-Block Chebyshev");
  MLList.set("smoother: Block Chebyshev number of blocks",NumBlocks);
  MLList.set("smoother: Block Chebyshev block starts",&BlockStarts[0]);
  MLList.set("smoother: Block Chebyshev block list",&Blockids[0]);    
  MLList.set("coarse: type","Amesos-KLU");  
  MLList.set("max levels",2);
  MLList.set("ML output",10);  
  MLList.set("smoother: polynomial order",2);
  strcpy(mystring,"IFPACK Block Cheby (Trivial)");
  TestMultiLevelPreconditioner(mystring, MLList, BadProblem,
                               TotalErrorResidual, TotalErrorExactSol);
  delete [] BlockStarts; delete [] Blockids;

  
  // ====================== //
  // IFPACK Block Cheby (Smart)
  // ====================== //
  // Figure out how many blocks we actually have and build a map...
  Epetra_Map* IfpackMap;
  int g_NumBlocks=-1,g_MaxSize=-1;
  if(Comm.MyPID() == 0){
    const int lineLength = 1025;
    char line[lineLength];

    FILE * f=fopen("localids_in_blocks.dat","r");
    assert(f!=0);

    // Next, strip off header lines (which start with "%")
    do {
      if(fgets(line, lineLength,f)==0) return(-4);
    } while (line[0] == '%');

    // Grab the number we actually care about
    sscanf(line, "%d %d", &g_NumBlocks, &g_MaxSize);
    fclose(f);
  }
  Comm.Broadcast(&g_NumBlocks,1,0);
  Comm.Broadcast(&g_MaxSize,1,0);
  Epetra_Map BlockMap(g_NumBlocks,0,Comm);   
  Epetra_MultiVector *blockids_disk=0;
  rv=EpetraExt::MatrixMarketFileToMultiVector("localids_in_blocks.dat",BlockMap,blockids_disk);

  // Put all the block info into the right place
  NumBlocks=BlockMap.NumMyElements();
  BlockStarts=new int[NumBlocks+1];
  Blockids= new int[g_MaxSize*NumBlocks];
  // NTS: Blockids_ is overallocated because I don't want to write a counting loop
  int i,cidx;
  for(i=0,cidx=0;i<NumBlocks;i++){
    BlockStarts[i]=cidx;
    Blockids[cidx]=(int)(*blockids_disk)[0][i];cidx++;
    if((*blockids_disk)[1][i] > 1e-2){
      Blockids[cidx]=(int)(*blockids_disk)[1][i];cidx++;
    }    
  }
  BlockStarts[NumBlocks]=cidx;
  
  
  if (Comm.MyPID() == 0) PrintLine();
  ML_Epetra::SetDefaults("SA",MLList);
  MLList.set("smoother: type","IFPACK-Block Chebyshev");
  MLList.set("smoother: Block Chebyshev number of blocks",NumBlocks);
  MLList.set("smoother: Block Chebyshev block starts",&BlockStarts[0]);
  MLList.set("smoother: Block Chebyshev block list",&Blockids[0]);    
  MLList.set("coarse: type","Amesos-KLU");  
  MLList.set("max levels",2);
  MLList.set("ML output",10);  
  MLList.set("smoother: polynomial order",2);
  strcpy(mystring,"IFPACK Block Cheby (Smart)");
  TestMultiLevelPreconditioner(mystring, MLList, BadProblem,
                               TotalErrorResidual, TotalErrorExactSol);
  
  delete blockids_disk; delete [] BlockStarts; delete [] Blockids;
#endif
  


  
  // ===================== //
  // print out total error //
  // ===================== //

  if (Comm.MyPID() == 0) {
    cout << endl;
    cout << "......Total error for residual        = " << TotalErrorResidual << endl;
    cout << "......Total error for exact solution  = " << TotalErrorExactSol << endl;
    cout << endl;
  }

  delete [] i_blockids;
  delete [] trivial_blockids;
  delete BadMatrix;
  delete d_blockids;
  if (TotalErrorResidual > 1e-5) {
    cerr << "Error: `BlockCheby.exe' failed!" << endl;
    exit(EXIT_FAILURE);
  }

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  if (Comm.MyPID() == 0)
    cerr << "`BlockCheby.exe' passed!" << endl;

  return (EXIT_SUCCESS);
}
コード例 #7
0
int Amesos_TestSolver( Epetra_Comm &Comm, char *matrix_file, 
		       SparseSolverType SparseSolver,
		       bool transpose, 
		       int special, AMESOS_MatrixType matrix_type ) {


  Epetra_Map * readMap;
  Epetra_CrsMatrix * readA; 
  Epetra_Vector * readx; 
  Epetra_Vector * readb;
  Epetra_Vector * readxexact;
   
  std::string FileName = matrix_file ;
  int FN_Size = FileName.size() ; 
  std::string LastFiveBytes = FileName.substr( EPETRA_MAX(0,FN_Size-5), FN_Size );
  std::string LastFourBytes = FileName.substr( EPETRA_MAX(0,FN_Size-4), FN_Size );
  bool NonContiguousMap = false; 

  if ( LastFiveBytes == ".triU" ) { 
    // Call routine to read in unsymmetric Triplet matrix
    NonContiguousMap = true; 
    EPETRA_CHK_ERR( Trilinos_Util_ReadTriples2Epetra( matrix_file, false, Comm, readMap, readA, readx, 
						      readb, readxexact, NonContiguousMap ) );
  } else {
    if ( LastFiveBytes == ".triS" ) { 
      NonContiguousMap = true; 
      // Call routine to read in symmetric Triplet matrix
      EPETRA_CHK_ERR( Trilinos_Util_ReadTriples2Epetra( matrix_file, true, Comm, readMap, readA, readx, 
							readb, readxexact, NonContiguousMap ) );
    } else {
      if (  LastFourBytes == ".mtx" ) { 
	EPETRA_CHK_ERR( Trilinos_Util_ReadMatrixMarket2Epetra( matrix_file, Comm, readMap, 
							       readA, readx, readb, readxexact) );
      } else {
	// Call routine to read in HB problem
	Trilinos_Util_ReadHb2Epetra( matrix_file, Comm, readMap, readA, readx, 
						     readb, readxexact) ;
      }
    }
  }

  Epetra_CrsMatrix transposeA(Copy, *readMap, 0);
  Epetra_CrsMatrix *serialA ; 

  if ( transpose ) {
    assert( CrsMatrixTranspose( readA, &transposeA ) == 0 ); 
    serialA = &transposeA ; 
  } else {
    serialA = readA ; 
  }

  Epetra_RowMatrix * passA = 0; 
  Epetra_Vector * passx = 0; 
  Epetra_Vector * passb = 0;
  Epetra_Vector * passxexact = 0;
  Epetra_Vector * passresid = 0;
  Epetra_Vector * passtmp = 0;

  // Create uniform distributed map
  Epetra_Map map(readMap->NumGlobalElements(), 0, Comm);
  Epetra_Map* map_;

  if( NonContiguousMap ) {
    //
    //  map gives us NumMyElements and MyFirstElement;
    //
    int NumGlobalElements =  readMap->NumGlobalElements();
    int NumMyElements = map.NumMyElements();
    int MyFirstElement = map.MinMyGID();
    std::vector<int> MapMap_( NumGlobalElements );
    readMap->MyGlobalElements( &MapMap_[0] ) ;
    Comm.Broadcast( &MapMap_[0], NumGlobalElements, 0 ) ; 
    map_ = new Epetra_Map( NumGlobalElements, NumMyElements, &MapMap_[MyFirstElement], 0, Comm);
  } else {
    map_ = new Epetra_Map( map ) ; 
  }


  Epetra_CrsMatrix A(Copy, *map_, 0);


  const Epetra_Map &OriginalMap = serialA->RowMatrixRowMap() ; 
  assert( OriginalMap.SameAs(*readMap) ); 
  Epetra_Export exporter(OriginalMap, *map_);
  Epetra_Export exporter2(OriginalMap, *map_);
  Epetra_Export MatrixExporter(OriginalMap, *map_);
  Epetra_CrsMatrix AwithDiag(Copy, *map_, 0);

  Epetra_Vector x(*map_);
  Epetra_Vector b(*map_);
  Epetra_Vector xexact(*map_);
  Epetra_Vector resid(*map_);
  Epetra_Vector readresid(*readMap);
  Epetra_Vector tmp(*map_);
  Epetra_Vector readtmp(*readMap);

  //  Epetra_Vector xcomp(*map_);      // X as computed by the solver
  bool distribute_matrix = ( matrix_type == AMESOS_Distributed ) ; 
  if ( distribute_matrix ) { 
    // Create Exporter to distribute read-in matrix and vectors
    //
    //  Initialize x, b and xexact to the values read in from the file
    //
    x.Export(*readx, exporter, Add);
    b.Export(*readb, exporter, Add);
    xexact.Export(*readxexact, exporter, Add);
    Comm.Barrier();
    
    A.Export(*serialA, exporter, Add);
    assert(A.FillComplete()==0);    
    
    Comm.Barrier();

    passA = &A; 

    passx = &x; 
    passb = &b;
    passxexact = &xexact;
    passresid = &resid;
    passtmp = &tmp;

  } else { 

    passA = serialA; 
    passx = readx; 
    passb = readb;
    passxexact = readxexact;
    passresid = &readresid;
    passtmp = &readtmp;
  }

  Epetra_MultiVector CopyB( *passb ) ;


  double Anorm = passA->NormInf() ; 
  SparseDirectTimingVars::SS_Result.Set_Anorm(Anorm) ;

  Epetra_LinearProblem Problem(  (Epetra_RowMatrix *) passA, 
				 (Epetra_MultiVector *) passx, 
				 (Epetra_MultiVector *) passb );
  

  for ( int i = 0; i < 1+special ; i++ ) { 
    Epetra_Time TotalTime( Comm ) ; 
    
    if ( false ) { 
      //  TEST_UMFPACK is never set by configure
#ifdef HAVE_AMESOS_SUPERLUDIST
    } else if ( SparseSolver == SUPERLUDIST ) {
	Teuchos::ParameterList ParamList ;
	ParamList.set( "MaxProcs", -3 );
	Amesos_Superludist A_Superludist( Problem ) ; 

  //ParamList.set( "Redistribute", true );
  //ParamList.set( "AddZeroToDiag", true );
  Teuchos::ParameterList& SuperludistParams = ParamList.sublist("Superludist") ;
  ParamList.set( "MaxProcs", -3 );

	EPETRA_CHK_ERR( A_Superludist.SetParameters( ParamList ) ); 
	EPETRA_CHK_ERR( A_Superludist.SetUseTranspose( transpose ) ); 
	EPETRA_CHK_ERR( A_Superludist.SymbolicFactorization(  ) ); 
	EPETRA_CHK_ERR( A_Superludist.NumericFactorization(  ) ); 
	EPETRA_CHK_ERR( A_Superludist.Solve(  ) ); 
#endif
#ifdef HAVE_AMESOS_DSCPACK
    } else if ( SparseSolver == DSCPACK ) {
      
      Teuchos::ParameterList ParamList ;
      ParamList.set( "MaxProcs", -3 );

      Amesos_Dscpack A_dscpack( Problem ) ; 
      EPETRA_CHK_ERR( A_dscpack.SetParameters( ParamList ) ); 
      EPETRA_CHK_ERR( A_dscpack.SymbolicFactorization(  ) ); 
      EPETRA_CHK_ERR( A_dscpack.NumericFactorization(  ) ); 
      EPETRA_CHK_ERR( A_dscpack.Solve(  ) ); 
#endif
#ifdef HAVE_AMESOS_SCALAPACK
    } else if ( SparseSolver == SCALAPACK ) {

      Teuchos::ParameterList ParamList ;
      Amesos_Scalapack A_scalapack( Problem ) ; 
      ParamList.set( "MaxProcs", -3 );
      EPETRA_CHK_ERR( A_scalapack.SetParameters( ParamList ) ); 
      EPETRA_CHK_ERR( A_scalapack.SetUseTranspose( transpose ) ); 
      EPETRA_CHK_ERR( A_scalapack.SymbolicFactorization(  ) ); 
      EPETRA_CHK_ERR( A_scalapack.NumericFactorization(  ) ); 
      EPETRA_CHK_ERR( A_scalapack.Solve(  ) ); 

#endif
#ifdef HAVE_AMESOS_TAUCS
    } else if ( SparseSolver == TAUCS ) {

      Teuchos::ParameterList ParamList ;
      Amesos_Taucs A_taucs( Problem ) ; 
      ParamList.set( "MaxProcs", -3 );
      EPETRA_CHK_ERR( A_taucs.SetParameters( ParamList ) ); 
      EPETRA_CHK_ERR( A_taucs.SetUseTranspose( transpose ) ); 
      EPETRA_CHK_ERR( A_taucs.SymbolicFactorization(  ) ); 
      EPETRA_CHK_ERR( A_taucs.NumericFactorization(  ) ); 
      EPETRA_CHK_ERR( A_taucs.Solve(  ) ); 

#endif
#ifdef HAVE_AMESOS_PARDISO
    } else if ( SparseSolver == PARDISO ) {

      Teuchos::ParameterList ParamList ;
      Amesos_Pardiso A_pardiso( Problem ) ; 
      ParamList.set( "MaxProcs", -3 );
      EPETRA_CHK_ERR( A_pardiso.SetParameters( ParamList ) ); 
      EPETRA_CHK_ERR( A_pardiso.SetUseTranspose( transpose ) ); 
      EPETRA_CHK_ERR( A_pardiso.SymbolicFactorization(  ) ); 
      EPETRA_CHK_ERR( A_pardiso.NumericFactorization(  ) ); 
      EPETRA_CHK_ERR( A_pardiso.Solve(  ) ); 

#endif
#ifdef HAVE_AMESOS_PARAKLETE
    } else if ( SparseSolver == PARAKLETE ) {

      Teuchos::ParameterList ParamList ;
      Amesos_Paraklete A_paraklete( Problem ) ; 
      ParamList.set( "MaxProcs", -3 );
      EPETRA_CHK_ERR( A_paraklete.SetParameters( ParamList ) ); 
      EPETRA_CHK_ERR( A_paraklete.SetUseTranspose( transpose ) ); 
      EPETRA_CHK_ERR( A_paraklete.SymbolicFactorization(  ) ); 
      EPETRA_CHK_ERR( A_paraklete.NumericFactorization(  ) ); 
      EPETRA_CHK_ERR( A_paraklete.Solve(  ) ); 

#endif
#ifdef HAVE_AMESOS_MUMPS
    } else if ( SparseSolver == MUMPS ) {

      Teuchos::ParameterList ParamList ;
      Amesos_Mumps A_mumps( Problem ) ; 
      ParamList.set( "MaxProcs", -3 );
      EPETRA_CHK_ERR( A_mumps.SetParameters( ParamList ) ); 
      EPETRA_CHK_ERR( A_mumps.SetUseTranspose( transpose ) ); 
      EPETRA_CHK_ERR( A_mumps.SymbolicFactorization(  ) ); 
      EPETRA_CHK_ERR( A_mumps.NumericFactorization(  ) ); 
      EPETRA_CHK_ERR( A_mumps.Solve(  ) ); 

#endif
#ifdef HAVE_AMESOS_SUPERLU
    } else if ( SparseSolver == SUPERLU ) {

      Teuchos::ParameterList ParamList ;
      Amesos_Superlu A_superlu( Problem ) ; 
      ParamList.set( "MaxProcs", -3 );
      EPETRA_CHK_ERR( A_superlu.SetParameters( ParamList ) ); 
      EPETRA_CHK_ERR( A_superlu.SetUseTranspose( transpose ) ); 
      EPETRA_CHK_ERR( A_superlu.SymbolicFactorization(  ) ); 
      EPETRA_CHK_ERR( A_superlu.NumericFactorization(  ) ); 
      EPETRA_CHK_ERR( A_superlu.Solve(  ) ); 

#endif
#ifdef HAVE_AMESOS_LAPACK
    } else if ( SparseSolver == LAPACK ) {

      Teuchos::ParameterList ParamList ;
      Amesos_Lapack A_lapack( Problem ) ; 
      ParamList.set( "MaxProcs", -3 );
      EPETRA_CHK_ERR( A_lapack.SetParameters( ParamList ) ); 
      EPETRA_CHK_ERR( A_lapack.SetUseTranspose( transpose ) ); 
      EPETRA_CHK_ERR( A_lapack.SymbolicFactorization(  ) ); 
      EPETRA_CHK_ERR( A_lapack.NumericFactorization(  ) ); 
      EPETRA_CHK_ERR( A_lapack.Solve(  ) ); 
#endif
#ifdef HAVE_AMESOS_UMFPACK
    } else if ( SparseSolver == UMFPACK ) {

      Teuchos::ParameterList ParamList ;
      Amesos_Umfpack A_umfpack( Problem ) ; 
      ParamList.set( "MaxProcs", -3 );
      EPETRA_CHK_ERR( A_umfpack.SetParameters( ParamList ) ); 
      EPETRA_CHK_ERR( A_umfpack.SetUseTranspose( transpose ) ); 
      EPETRA_CHK_ERR( A_umfpack.SymbolicFactorization(  ) ); 
      EPETRA_CHK_ERR( A_umfpack.NumericFactorization(  ) ); 
      EPETRA_CHK_ERR( A_umfpack.Solve(  ) ); 
#endif
#ifdef HAVE_AMESOS_KLU
    } else if ( SparseSolver == KLU ) {


      using namespace Teuchos;

      Amesos_Time AT; 
      int setupTimePtr = -1, symTimePtr = -1, numTimePtr = -1, refacTimePtr = -1, solveTimePtr = -1;
      AT.CreateTimer(Comm, 2);
      AT.ResetTimer(0);

      Teuchos::ParameterList ParamList ;
      // ParamList.set("OutputLevel",2);
      Amesos_Klu A_klu( Problem ); 
      ParamList.set( "MaxProcs", -3 );
      ParamList.set( "TrustMe", false );
      // ParamList.set( "Refactorize", true );
      EPETRA_CHK_ERR( A_klu.SetParameters( ParamList ) ) ; 
      EPETRA_CHK_ERR( A_klu.SetUseTranspose( transpose ) ); 
      setupTimePtr = AT.AddTime("Setup", setupTimePtr, 0);
      EPETRA_CHK_ERR( A_klu.SymbolicFactorization(  ) ); 
      symTimePtr = AT.AddTime("Symbolic", symTimePtr, 0);
      EPETRA_CHK_ERR( A_klu.NumericFactorization(  ) ); 
      numTimePtr = AT.AddTime("Numeric", numTimePtr, 0);
      EPETRA_CHK_ERR( A_klu.NumericFactorization(  ) ); 
      refacTimePtr = AT.AddTime("Refactor", refacTimePtr, 0);
      // for ( int i=0; i<100000 ; i++ ) 
      EPETRA_CHK_ERR( A_klu.Solve(  ) ); 
      solveTimePtr = AT.AddTime("Solve", solveTimePtr, 0);

      double SetupTime = AT.GetTime(setupTimePtr);
      double SymbolicTime = AT.GetTime(symTimePtr);
      double NumericTime = AT.GetTime(numTimePtr);
      double RefactorTime = AT.GetTime(refacTimePtr);
      double SolveTime = AT.GetTime(solveTimePtr);

      std::cout << __FILE__ << "::"  << __LINE__ << " SetupTime = " << SetupTime << std::endl ; 
      std::cout << __FILE__ << "::"  << __LINE__ << " SymbolicTime = " << SymbolicTime - SetupTime << std::endl ; 
      std::cout << __FILE__ << "::"  << __LINE__ << " NumericTime = " << NumericTime - SymbolicTime<< std::endl ; 
      std::cout << __FILE__ << "::"  << __LINE__ << " RefactorTime = " << RefactorTime - NumericTime << std::endl ; 
      std::cout << __FILE__ << "::"  << __LINE__ << " SolveTime = " << SolveTime - RefactorTime << std::endl ; 

#endif
    } else { 
      SparseDirectTimingVars::log_file << "Solver not implemented yet" << std::endl ;
      std::cerr << "\n\n####################  Requested solver not available on this platform ##################### ATS\n" << std::endl ;
      std::cout << " SparseSolver = " << SparseSolver << std::endl ; 
      std::cerr << " SparseSolver = " << SparseSolver << std::endl ; 
    }
    
    SparseDirectTimingVars::SS_Result.Set_Total_Time( TotalTime.ElapsedTime() ); 
  }  // end for (int i=0; i<special; i++ ) 

  //
  //  Compute the error = norm(xcomp - xexact )
  //
  double error;
  passresid->Update(1.0, *passx, -1.0, *passxexact, 0.0);

  passresid->Norm2(&error);
  SparseDirectTimingVars::SS_Result.Set_Error(error) ;

  //  passxexact->Norm2(&error ) ; 
  //  passx->Norm2(&error ) ; 

  //
  //  Compute the residual = norm(Ax - b)
  //
  double residual ; 

  passA->Multiply( transpose, *passx, *passtmp);
  passresid->Update(1.0, *passtmp, -1.0, *passb, 0.0); 
  //  passresid->Update(1.0, *passtmp, -1.0, CopyB, 0.0); 
  passresid->Norm2(&residual);

  SparseDirectTimingVars::SS_Result.Set_Residual(residual) ;
    
  double bnorm; 
  passb->Norm2( &bnorm ) ; 
  SparseDirectTimingVars::SS_Result.Set_Bnorm(bnorm) ;

  double xnorm; 
  passx->Norm2( &xnorm ) ; 
  SparseDirectTimingVars::SS_Result.Set_Xnorm(xnorm) ;

  delete readA;
  delete readx;
  delete readb;
  delete readxexact;
  delete readMap;
  delete map_;
  
  Comm.Barrier();

  return 0;
}