// ====================================================================== 
void Finalize() 
{
  if (ML_Comm_) {
    ML_Comm_Destroy(&ML_Comm_);
    ML_Comm_ = 0;
  }

  if (Epetra_Comm_) {
    delete Epetra_Comm_;
    Epetra_Comm_ = 0;
  }
}
// ================================================ ====== ==== ==== == =
// Destroys all structures allocated in \c ComputePreconditioner() if the preconditioner has been computed.
int ML_Epetra::FaceMatrixFreePreconditioner::DestroyPreconditioner(){
  if (ml_comm_) { ML_Comm_Destroy(&ml_comm_); ml_comm_ = 0; }// will need this
  if (Prolongator_) {delete Prolongator_; Prolongator_=0;}
  if (InvDiagonal_) {delete InvDiagonal_; InvDiagonal_=0;}
  if (CoarsePC) {delete CoarsePC; CoarsePC=0;}
  if (CoarseMatrix) {delete CoarseMatrix; CoarseMatrix=0;}
  if (CoarseMat_ML) {ML_Operator_Destroy(&CoarseMat_ML);CoarseMat_ML=0;}
  if (CoarseMap_) {delete CoarseMap_; CoarseMap_=0;}
#ifdef HAVE_ML_IFPACK
  if (Smoother_){delete Smoother_; Smoother_=0;}
#endif
  return 0;
}/*end DestroyPreconditioner*/
// ================================================ ====== ==== ==== == = 
// Computes C= <me> * A
int ML_Epetra::ML_RefMaxwell_11_Operator::MatrixMatrix_Multiply(const Epetra_CrsMatrix & A, Epetra_CrsMatrix **C) const
{
  ML_Comm* comm;
  ML_Comm_Create(&comm);
#ifdef ML_MPI
  const Epetra_MpiComm *epcomm = dynamic_cast<const Epetra_MpiComm*>(&(A.Comm()));
  // Get the MPI communicator, as it may not be MPI_COMM_W0RLD, and update the ML comm object
  if (epcomm) ML_Comm_Set_UsrComm(comm,epcomm->Comm());
#endif
  ML_Operator *C_;
  int rv=MatrixMatrix_Multiply(A,comm,&C_);
  Epetra_CrsMatrix_Wrap_ML_Operator(C_,*Comm_,*DomainMap_,C,Copy,A.IndexBase());
  ML_Operator_Destroy(&C_);
  ML_Comm_Destroy(&comm);
  return rv;
}/*end MatrixMatrix_Multiply*/
int main(int argc, char *argv[])
{

#ifdef ML_MPI
  MPI_Init(&argc,&argv);
  // This next bit of code drops a middle rank out of the calculation. This tests
  // that the Hiptmair smoother does not hang in its apply.  Hiptmair creates
  // two separate ML objects, one for edge and one for nodes.  By default, the ML
  // objects use MPI_COMM_WORLD, whereas the matrix that Hiptmair is being applied to
  // may have an MPI subcommunicator.
  int commWorldSize;
  MPI_Comm_size(MPI_COMM_WORLD,&commWorldSize);
  std::vector<int> splitKey;
  int rankToDrop = 1;
  for (int i=0; i<commWorldSize; ++i)
    splitKey.push_back(0);
  splitKey[rankToDrop] = MPI_UNDEFINED; //drop the last process from subcommunicator
  int myrank;
  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  MPI_Comm subcomm;
  MPI_Comm_split(MPI_COMM_WORLD, splitKey[myrank], myrank, &subcomm);
  if (myrank == rankToDrop) goto droppedRankLabel;
#endif

{ //scoping to avoid compiler error about goto jumping over initialization
#ifdef ML_MPI
  Epetra_MpiComm Comm(subcomm);
#else
  Epetra_SerialComm Comm;
#endif

  ML_Comm_Create(&mlcomm);

  char *datafile;

#ifdef CurlCurlAndMassAreSeparate
  if (argc != 5 && argc != 7) {
    if (Comm.MyPID() == 0) {
      std::cout << "usage: ml_maxwell.exe <S> <M> <T> <Kn> [edge map] [node map]"
           << std::endl;
      std::cout << "        S = edge stiffness matrix file" << std::endl;
      std::cout << "        M = edge mass matrix file" << std::endl;
      std::cout << "        T = discrete gradient file" << std::endl;
      std::cout << "       Kn = auxiliary nodal FE matrix file" << std::endl;
      std::cout << " edge map = edge distribution over processors" << std::endl;
      std::cout << " node map = node distribution over processors" << std::endl;
      std::cout << argc << std::endl;
    }
#else //ifdef CurlCurlAndMassAreSeparate
  if (argc != 4 && argc != 6) {
    if (Comm.MyPID() == 0) {
      std::cout << "usage: ml_maxwell.exe <A> <T> <Kn> [edge map] [node map]" <<std::endl;
      std::cout << "        A = edge element matrix file" << std::endl;
      std::cout << "        T = discrete gradient file" << std::endl;
      std::cout << "       Kn = auxiliary nodal FE matrix file" << std::endl;
      std::cout << " edge map = edge distribution over processors" << std::endl;
      std::cout << " node map = node distribution over processors" << std::endl;
      std::cout << argc << std::endl;
    }
#endif //ifdef CurlCurlAndMassAreSeparate
#ifdef ML_MPI
    MPI_Finalize();
#endif
    exit(1);
  }

  Epetra_Map *edgeMap, *nodeMap;
  Epetra_CrsMatrix *CCplusM=NULL, *CurlCurl=NULL, *Mass=NULL, *T=NULL, *Kn=NULL;

  // ================================================= //
  // READ IN MAPS FROM FILE                            //
  // ================================================= //
  // every processor reads this in
#ifdef CurlCurlAndMassAreSeparate
  if (argc > 5)
#else
  if (argc > 4)
#endif
  {
    datafile = argv[5];
    if (Comm.MyPID() == 0) {
      printf("Reading in edge map from %s ...\n",datafile);
      fflush(stdout);
    }
    EpetraExt::MatrixMarketFileToMap(datafile, Comm, edgeMap);
    datafile = argv[6];
    if (Comm.MyPID() == 0) {
      printf("Reading in node map from %s ...\n",datafile);
      fflush(stdout);
    }
    EpetraExt::MatrixMarketFileToMap(datafile, Comm, nodeMap);
  }
  else { // linear maps
         // Read the T matrix to determine the map sizes
         // and then construct linear maps

    if (Comm.MyPID() == 0)
      printf("Using linear edge and node maps ...\n");

    const int lineLength = 1025;
    char line[lineLength];
    FILE *handle;
    int M,N,NZ;
#ifdef CurlCurlAndMassAreSeparate
     handle = fopen(argv[3],"r");
#else
     handle = fopen(argv[2],"r");
#endif
    if (handle == 0) EPETRA_CHK_ERR(-1); // file not found
    // Strip off header lines (which start with "%")
    do {
       if(fgets(line, lineLength, handle)==0) {if (handle!=0) fclose(handle);}
    } while (line[0] == '%');
    // Get problem dimensions: M, N, NZ
    if(sscanf(line,"%d %d %d", &M, &N, &NZ)==0) {if (handle!=0) fclose(handle);}
    fclose(handle);
    edgeMap = new Epetra_Map(M,0,Comm);
    nodeMap = new Epetra_Map(N,0,Comm);
  }

  // ===================================================== //
  // READ IN MATRICES FROM FILE                            //
  // ===================================================== //
#ifdef CurlCurlAndMassAreSeparate
  for (int i = 1; i <5; i++) {
    datafile = argv[i];
    if (Comm.MyPID() == 0) {
      printf("reading %s ....\n",datafile); fflush(stdout);
    }
    switch (i) {
    case 1: //Curl
      MatrixMarketFileToCrsMatrix(datafile,*edgeMap,*edgeMap,*edgeMap,CurlCurl);
      break;
    case 2: //Mass
      MatrixMarketFileToCrsMatrix(datafile, *edgeMap, *edgeMap, *edgeMap, Mass);
      break;
    case 3: //Gradient
      MatrixMarketFileToCrsMatrix(datafile, *edgeMap, *edgeMap,*nodeMap, T);
      break;
    case 4: //Auxiliary nodal matrix
      MatrixMarketFileToCrsMatrix(datafile, *nodeMap,*nodeMap, *nodeMap, Kn);
      break;
    } //switch
  } //for (int i = 1; i <5; i++)

#else
  for (int i = 1; i <4; i++) {
    datafile = argv[i];
    if (Comm.MyPID() == 0) {
      printf("reading %s ....\n",datafile); fflush(stdout);
    }
    switch (i) {
    case 1: //Edge element matrix
      MatrixMarketFileToCrsMatrix(datafile,*edgeMap,*edgeMap,*edgeMap,CCplusM);
      break;
    case 2: //Gradient
      MatrixMarketFileToCrsMatrix(datafile, *edgeMap, *edgeMap, *nodeMap, T);
      break;
    case 3: //Auxiliary nodal matrix
      MatrixMarketFileToCrsMatrix(datafile, *nodeMap, *nodeMap, *nodeMap, Kn);
      break;
    } //switch
  } //for (int i = 1; i <4; i++)
#endif //ifdef CurlCurlAndMassAreSeparate

  // ==================================================== //
  // S E T U P   O F    M L   P R E C O N D I T I O N E R //
  // ==================================================== //

  Teuchos::ParameterList MLList;
  int *options    = new int[AZ_OPTIONS_SIZE];
  double *params  = new double[AZ_PARAMS_SIZE];
  ML_Epetra::SetDefaults("maxwell", MLList, options, params);

  MLList.set("ML output", 10);
  MLList.set("aggregation: type", "Uncoupled");
  MLList.set("coarse: max size", 15);
  MLList.set("aggregation: threshold", 0.0);
  //MLList.set("negative conductivity",true);
  //MLList.set("smoother: type", "Jacobi");
  MLList.set("subsmoother: type", "symmetric Gauss-Seidel");
  //MLList.set("max levels", 2);

  // coarse level solve
  MLList.set("coarse: type", "Amesos-KLU");
  //MLList.set("coarse: type", "Hiptmair");
  //MLList.set("coarse: type", "Jacobi");

  //MLList.set("dump matrix: enable", true);

#ifdef CurlCurlAndMassAreSeparate
  //Create the matrix of interest.
  CCplusM = Epetra_MatrixAdd(CurlCurl,Mass,1.0);
#endif

#ifdef CurlCurlAndMassAreSeparate
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*CurlCurl, *Mass, *T, *Kn, MLList);
  // Comment out the line above and uncomment the next one if you have
  // mass and curl separately but want to precondition as if they are added
  // together.
/*
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*CCplusM, *T, *Kn, MLList);
*/
#else
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*CCplusM, *T, *Kn, MLList);
#endif //ifdef CurlCurlAndMassAreSeparate

  MLPrec->PrintUnused(0);

  // ========================================================= //
  // D E F I N I T I O N   O F   A Z T E C O O   P R O B L E M //
  // ========================================================= //

  // create left-hand side and right-hand side, and populate them with
  // data from file. Both vectors are defined on the domain map of the
  // edge matrix.
  // Epetra_Vectors can be created in View mode, to accept pointers to
  // double vectors.

  if (Comm.MyPID() == 0)
    std::cout << "Putting in a zero initial guess and random rhs (in the range of S+M)" << std::endl;
  Epetra_Vector x(CCplusM->DomainMap());
  x.Random();
  Epetra_Vector rhs(CCplusM->DomainMap());
  CCplusM->Multiply(false,x,rhs);
  x.PutScalar(0.0);

  double vecnorm;
  rhs.Norm2(&vecnorm);
  if (Comm.MyPID() == 0) std::cout << "||rhs|| = " << vecnorm << std::endl;
  x.Norm2(&vecnorm);
  if (Comm.MyPID() == 0) std::cout << "||x|| = " << vecnorm << std::endl;

  // for AztecOO, we need an Epetra_LinearProblem
  Epetra_LinearProblem Problem(CCplusM,&x,&rhs);
  // AztecOO Linear problem
  AztecOO solver(Problem);
  // set MLPrec as precondititoning operator for AztecOO linear problem
  //std::cout << "no ml preconditioner!!!" << std::endl;
  solver.SetPrecOperator(MLPrec);
  //solver.SetAztecOption(AZ_precond, AZ_Jacobi);

  // a few options for AztecOO
  solver.SetAztecOption(AZ_solver, AZ_cg);
  solver.SetAztecOption(AZ_output, 1);

  solver.Iterate(15, 1e-3);

  // =============== //
  // C L E A N   U P //
  // =============== //

  delete MLPrec;    // destroy phase prints out some information
  delete CurlCurl;
  delete CCplusM;
  delete Mass;
  delete T;
  delete Kn;
  delete nodeMap;
  delete edgeMap;
  delete [] params;
  delete [] options;

  ML_Comm_Destroy(&mlcomm);
} //avoids compiler error about jumping over initialization
  droppedRankLabel:
#ifdef ML_MPI
  MPI_Finalize();
#endif

  return 0;

} //main

#else

#include <stdlib.h>
#include <stdio.h>
#ifdef HAVE_MPI
#include "mpi.h"
#endif

int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  puts("Please configure ML with:");
#if !defined(HAVE_ML_EPETRA)
  puts("--enable-epetra");
#endif
#if !defined(HAVE_ML_TEUCHOS)
  puts("--enable-teuchos");
#endif
#if !defined(HAVE_ML_EPETRAEXT)
  puts("--enable-epetraext");
#endif
#if !defined(HAVE_ML_AZTECOO)
  puts("--enable-aztecoo");
#endif

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return 0;
}
int main(int argc, char *argv[])
{
  int    Nnodes=32*32;              /* Total number of nodes in the problem.*/
                                    /* 'Nnodes' must be a perfect square.   */

  struct       user_partition Edge_Partition = {NULL, NULL,0,0,NULL,0,0,0}, 
                                Node_Partition = {NULL, NULL,0,0,NULL,0,0,0};

  int          proc_config[AZ_PROC_SIZE];

#ifdef ML_MPI
  MPI_Init(&argc,&argv);
#endif

  AZ_set_proc_config(proc_config, COMMUNICATOR);
  ML_Comm* comm;
  ML_Comm_Create(&comm);

  Node_Partition.Nglobal = Nnodes;
  Edge_Partition.Nglobal = Node_Partition.Nglobal*2;

  user_partition_nodes(&Node_Partition);
  user_partition_edges(&Edge_Partition, &Node_Partition);
  
  AZ_MATRIX * AZ_Ke = user_Ke_build(&Edge_Partition);
  AZ_MATRIX * AZ_Kn = user_Kn_build(&Node_Partition);

  // convert (put wrappers) from Aztec matrices to ML_Operator's

  ML_Operator * ML_Ke, * ML_Kn, * ML_Tmat;

  ML_Ke = ML_Operator_Create( comm );
  ML_Kn = ML_Operator_Create( comm );

  AZ_convert_aztec_matrix_2ml_matrix(AZ_Ke,ML_Ke,proc_config);
  AZ_convert_aztec_matrix_2ml_matrix(AZ_Kn,ML_Kn,proc_config);

  ML_Tmat = user_T_build(&Edge_Partition, &Node_Partition, 
		      ML_Kn, comm);

  Epetra_CrsMatrix * Epetra_Kn, * Epetra_Ke, * Epetra_T;
  
  int MaxNumNonzeros;
  double CPUTime;

  ML_Operator2EpetraCrsMatrix(ML_Ke,Epetra_Ke,
			      MaxNumNonzeros,
			      true,CPUTime);

  ML_Operator2EpetraCrsMatrix(ML_Kn,
			      Epetra_Kn,MaxNumNonzeros,
			      true,CPUTime);

  ML_Operator2EpetraCrsMatrix(ML_Tmat,Epetra_T,MaxNumNonzeros,
			      true,CPUTime);  

  Teuchos::ParameterList MLList;
  ML_Epetra::SetDefaults("maxwell", MLList);
  
  MLList.set("ML output", 0);

  MLList.set("aggregation: type", "Uncoupled");
  MLList.set("coarse: max size", 30);
  MLList.set("aggregation: threshold", 0.0);

  MLList.set("coarse: type", "Amesos-KLU");

  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*Epetra_Ke, *Epetra_T, *Epetra_Kn,
					    MLList);

  Epetra_Vector LHS(Epetra_Ke->DomainMap()); LHS.Random();
  Epetra_Vector RHS(Epetra_Ke->DomainMap()); RHS.PutScalar(1.0);
  
  Epetra_LinearProblem Problem(Epetra_Ke,&LHS,&RHS);
  AztecOO solver(Problem);
  solver.SetPrecOperator(MLPrec);

  solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
  solver.SetAztecOption(AZ_output, 32);
  solver.Iterate(500, 1e-8);

  // ========================= //
  // compute the real residual //
  // ========================= //

  Epetra_Vector RHScomp(Epetra_Ke->DomainMap());
  int ierr;
  ierr = Epetra_Ke->Multiply(false, LHS, RHScomp);
  assert(ierr==0);

  Epetra_Vector resid(Epetra_Ke->DomainMap());

  ierr = resid.Update(1.0, RHS, -1.0, RHScomp, 0.0);
  assert(ierr==0);

  double residual;
  ierr = resid.Norm2(&residual);
  assert(ierr==0);
  if (proc_config[AZ_node] == 0) {
    std::cout << std::endl;
    std::cout << "==> Residual = " << residual << std::endl;
    std::cout << std::endl;
  }

  // =============== //
  // C L E A N   U P //
  // =============== //
  
  delete MLPrec;    // destroy phase prints out some information
  delete Epetra_Kn;
  delete Epetra_Ke;
  delete Epetra_T;
  
  ML_Operator_Destroy( &ML_Ke );
  ML_Operator_Destroy( &ML_Kn );
  ML_Comm_Destroy( &comm );

  if (Edge_Partition.my_local_ids != NULL) free(Edge_Partition.my_local_ids);
  if (Node_Partition.my_local_ids != NULL) free(Node_Partition.my_local_ids);
  if (Node_Partition.my_global_ids != NULL) free(Node_Partition.my_global_ids);
  if (Edge_Partition.my_global_ids != NULL) free(Edge_Partition.my_global_ids);
  if (Node_Partition.needed_external_ids != NULL) 
    free(Node_Partition.needed_external_ids);
  if (Edge_Partition.needed_external_ids != NULL) 
    free(Edge_Partition.needed_external_ids);

  if (AZ_Ke!= NULL) {
    AZ_free(AZ_Ke->bindx);
    AZ_free(AZ_Ke->val);
    AZ_free(AZ_Ke->data_org);
    AZ_matrix_destroy(&AZ_Ke);
  }
  if (AZ_Kn!= NULL) {
    AZ_free(AZ_Kn->bindx);
    AZ_free(AZ_Kn->val);
    AZ_free(AZ_Kn->data_org);
    AZ_matrix_destroy(&AZ_Kn);
  }

  ML_Operator_Destroy(&ML_Tmat);

  if (residual > 1e-5) {
    std::cout << "`MultiLevelPreconditioner_Maxwell.exe' failed!" << std::endl;
    exit(EXIT_FAILURE);
  }

#ifdef ML_MPI
  MPI_Finalize();
#endif
		
  if (proc_config[AZ_node] == 0)
    std::cout << "`MultiLevelPreconditioner_Maxwell.exe' passed!" << std::endl;
  exit(EXIT_SUCCESS);
		
}