コード例 #1
0
// ================================================ ====== ==== ==== == = 
// Computes C= <me> * A
int ML_Epetra::ML_RefMaxwell_11_Operator::MatrixMatrix_Multiply(const Epetra_CrsMatrix & A, Epetra_CrsMatrix **C) const
{
  ML_Comm* comm;
  ML_Comm_Create(&comm);
#ifdef ML_MPI
  const Epetra_MpiComm *epcomm = dynamic_cast<const Epetra_MpiComm*>(&(A.Comm()));
  // Get the MPI communicator, as it may not be MPI_COMM_W0RLD, and update the ML comm object
  if (epcomm) ML_Comm_Set_UsrComm(comm,epcomm->Comm());
#endif
  ML_Operator *C_;
  int rv=MatrixMatrix_Multiply(A,comm,&C_);
  Epetra_CrsMatrix_Wrap_ML_Operator(C_,*Comm_,*DomainMap_,C,Copy,A.IndexBase());
  ML_Operator_Destroy(&C_);
  ML_Comm_Destroy(&comm);
  return rv;
}/*end MatrixMatrix_Multiply*/
コード例 #2
0
/* Assign unknowns to processors */
void partition_edges(struct user_partition *Partition)
{
  int i, nx, Nloc;
  ML_Comm *comm;
  int *my_local_ids;

#ifdef AZTEC
  int    proc_config[AZ_PROC_SIZE];

  AZ_set_proc_config(proc_config, COMMUNICATOR);
  Partition->mypid = proc_config[AZ_node];
  Partition->nprocs= proc_config[AZ_N_procs];

  AZ_input_update(NULL,&(Partition->Nlocal),
		  &(Partition->my_global_ids),
		    proc_config,     Partition->Nglobal, 1, AZ_linear);
#else
  Partition->Nghost = 0;

  nx = (int) sqrt( ((double) Partition->Nglobal/2) + .00001);

  ML_Comm_Create( &comm);
  Partition->mypid = comm->ML_mypid;
  Partition->nprocs= comm->ML_nprocs;

  if (comm->ML_nprocs > 2) {
    if (comm->ML_mypid == 0)
      printf("This example only works for one or two processors\n");
    exit(1);
  }
  if ((comm->ML_nprocs == 2) && (Partition->Nglobal%2 == 1)) {
    if (comm->ML_mypid == 0)
      printf("Two processor case requires an even number of points.\n");
    exit(1);
  }


  if (comm->ML_nprocs == 1) {  /* 1 processor case is simple */
    Partition->Nlocal = Partition->Nglobal; 
    Partition->my_global_ids = (int *) malloc(sizeof(int)*Partition->Nlocal);
    for (i = 0; i < Partition->Nlocal; i++) 
      (Partition->my_global_ids)[i] = i;
    
    Partition->my_local_ids = (int *) malloc(sizeof(int)*Partition->Nglobal);
    for (i = 0; i < Partition->Nglobal; i++) 
      (Partition->my_local_ids)[i] = i;
  }
  else {

    /* allocate space */

    Partition->Nlocal = Partition->Nglobal/2; 
    Partition->my_global_ids = (int *) malloc(sizeof(int)*Partition->Nlocal);
    my_local_ids =  (int *) malloc(sizeof(int)*Partition->Nglobal);
    Partition->my_local_ids = my_local_ids;

    /* initialize local ids to '-1' (not owned by me) */
    for (i = 0; i < Partition->Nglobal; i++) 
      (Partition->my_local_ids)[i] = -1;

    /* set global ids */
    for (i = 0; i < Partition->Nlocal/2; i++) {
      (Partition->my_global_ids)[i] = i + comm->ML_mypid*Partition->Nlocal/2;
    }
    for (i = 0; i < Partition->Nlocal/2; i++) {
      (Partition->my_global_ids)[i + Partition->Nlocal/2] = 
	Partition->Nlocal+
	i + comm->ML_mypid*Partition->Nlocal/2;
    }

    /* set local ids of nonghost unknowns */

    for (i = 0; i < Partition->Nlocal; i++) 
      my_local_ids[(Partition->my_global_ids)[i]] = i;

    /* set the ghost unknowns */
    Partition->Nghost = 3*nx;

    Nloc = Partition->Nlocal;
    if (comm->ML_mypid == 0) {
      for (i = 0; i < nx; i++) {
	my_local_ids[Nloc/2 + i ] = Nloc + i;
	my_local_ids[Nloc - nx + i ] = Nloc + i + nx;
	my_local_ids[2*Nloc - nx + i ] = Nloc + i + 2*nx;
      }
    }
    else {
      for (i = 0; i < nx; i++) {
	my_local_ids[Nloc/2 - nx + i ] = Nloc + i;
	my_local_ids[              i ] = Nloc + i + nx;
	my_local_ids[3*Nloc/2 - nx + i ] = Nloc + i + 2*nx;
      }
    }

  }

#endif
}
コード例 #3
0
/* Assign unknowns to processors */
void partition_nodes(struct user_partition *Partition)
{
  int i, nx, Nloc;
  ML_Comm *comm;
  int *my_local_ids;

  Partition->Nghost = 0;
#ifdef AZTEC
  int    proc_config[AZ_PROC_SIZE];

  AZ_set_proc_config(proc_config, COMMUNICATOR);

  AZ_input_update(NULL,&(Partition->Nlocal),
		  &(Partition->my_global_ids),
		    proc_config,     Partition->Nglobal, 1, AZ_linear);
  Partition->mypid = proc_config[AZ_node];
  Partition->nprocs= proc_config[AZ_N_procs];

#else

  nx = (int) sqrt( ((double) Partition->Nglobal) + .00001);

  ML_Comm_Create( &comm);
  Partition->mypid = comm->ML_mypid;
  Partition->nprocs= comm->ML_nprocs;
  Partition->Nghost = 2*nx;

  if (comm->ML_nprocs > 2) {
    if (comm->ML_mypid == 0)
      printf("This example only works for one or two processors\n");
    exit(1);
  }
  if ((comm->ML_nprocs == 2) && (Partition->Nglobal%2 == 1)) {
    if (comm->ML_mypid == 0)
      printf("Two processor case requires an even number of points.\n");
    exit(1);
  }
  if (comm->ML_nprocs == 1) {
    Partition->Nlocal = Partition->Nglobal; 
    Partition->my_global_ids = (int *) malloc(sizeof(int)*Partition->Nlocal);
    for (i = 0; i < Partition->Nlocal; i++) 
      (Partition->my_global_ids)[i] = i;
    
    Partition->my_local_ids = (int *) malloc(sizeof(int)*Partition->Nglobal);
    for (i = 0; i < Partition->Nglobal; i++) 
      (Partition->my_local_ids)[i] = i;
  }
  else {
    Partition->Nlocal = Partition->Nglobal/2; 
    Partition->my_global_ids = (int *) malloc(sizeof(int)*Partition->Nlocal);
    my_local_ids =  (int *) malloc(sizeof(int)*Partition->Nglobal);
    Partition->my_local_ids = my_local_ids;

    for (i = 0; i < Partition->Nglobal; i++) 
      (Partition->my_local_ids)[i] = -1;

    for (i = 0; i < Partition->Nlocal; i++) {
      (Partition->my_global_ids)[i] = i + comm->ML_mypid*Partition->Nlocal;
      my_local_ids[(Partition->my_global_ids)[i]] = i;
    }

    Nloc = Partition->Nlocal;
    if (comm->ML_mypid == 0) {
      for (i = 0; i < nx; i++) {
	my_local_ids[Nloc + i ] = Nloc + i;
	my_local_ids[2*Nloc - nx + i ] = Nloc + i + nx;
      }
    }
    else {
      for (i = 0; i < nx; i++) {
	my_local_ids[Nloc - nx + i ] = Nloc + i;
	my_local_ids[            i ] = Nloc + i + nx;
      }
    }

  }
#endif
}
コード例 #4
0
int main(int argc, char *argv[])
{
  int    Nnodes=16*16;              /* Total number of nodes in the problem.*/
                                    /* 'Nnodes' must be a perfect square.   */
  int    MaxMgLevels=6;             /* Maximum number of Multigrid Levels   */
  int    Nits_per_presmooth=1;      /* # of pre & post smoothings per level */
  double tolerance = 1.0e-8;        /* At convergence:                      */
                                    /*   ||r_k||_2 < tolerance ||r_0||_2    */
  int smoothPe_flag = ML_YES;       /* ML_YES: smooth tentative prolongator */
                                    /* ML_NO: don't smooth prolongator      */

  /***************************************************************************/
  /* Select Hiptmair relaxation subsmoothers for the nodal and edge problems */
  /* Choices include                                                         */
  /*   1) ML_Gen_Smoother_SymGaussSeidel: this corresponds to a processor    */
  /*      local version of symmetric Gauss-Seidel/SOR. The number of sweeps  */
  /*      can be set via either 'edge_its' or 'nodal_its'. The damping can   */
  /*      be set via 'edge_omega' or 'nodal_omega'. When set to ML_DDEFAULT, */
  /*      the damping is set to '1' on one processor. On multiple processors */
  /*      a lower damping value is set. This is needed to converge processor */
  /*      local SOR.                                                         */
  /*   2) ML_Gen_Smoother_Cheby: this corresponds to polynomial relaxation.    */
  /*      The degree of the polynomial is set via 'edge_its' or 'nodal_its'. */
  /*      If the degree is '-1', Marian Brezina's MLS polynomial is chosen.  */
  /*      Otherwise, a Chebyshev polynomial is used over high frequencies    */
  /*      [ lambda_max/alpha , lambda_max]. Lambda_max is computed. 'alpha'  */
  /*      is hardwired in this example to correspond to twice the ratio of   */
  /*      unknowns in the fine and coarse meshes.                            */
  /*                                                                         */
  /* Using 'hiptmair_type' (see comments below) it is also possible to choose*/
  /* when edge and nodal problems are relaxed within the Hiptmair smoother.  */
  /***************************************************************************/

  void  *edge_smoother=(void *)     /* Edge relaxation:                     */
               ML_Gen_Smoother_Cheby; /*   ML_Gen_Smoother_Cheby            */
                                    /*     ML_Gen_Smoother_SymGaussSeidel   */
  void *nodal_smoother=(void *)     /* Nodal relaxation                     */
               ML_Gen_Smoother_Cheby;/*     ML_Gen_Smoother_Cheby           */
                                    /*     ML_Gen_Smoother_SymGaussSeidel   */

  int  edge_its = 3;                /* Iterations or polynomial degree for  */
  int  nodal_its = 3;               /* edge/nodal subsmoothers.             */
  double nodal_omega = ML_DDEFAULT, /* SOR damping parameter for noda/edge  */
         edge_omega  = ML_DDEFAULT; /* subsmoothers (see comments above).   */
  int   hiptmair_type=HALF_HIPTMAIR;/* FULL_HIPTMAIR: each invokation       */
                                    /*     smoothes on edges, then nodes,   */
                                    /*     and then once again on edges.    */
                                    /* HALF_HIPTMAIR: each pre-invokation   */
                                    /*     smoothes on edges, then nodes.   */
                                    /*     Each post-invokation smoothes    */
                                    /*     on nodes then edges. .           */


  ML_Operator  *Tmat, *Tmat_trans, **Tmat_array, **Tmat_trans_array;
  ML           *ml_edges, *ml_nodes;
  ML_Aggregate *ag;
  int          Nfine_edge, Ncoarse_edge, Nfine_node, Ncoarse_node, Nlevels;
  int          level, coarsest_level, itmp;
  double       edge_coarsening_rate, node_coarsening_rate, *rhs, *xxx;
  void         **edge_args, **nodal_args;
  struct       user_partition Edge_Partition = {NULL, NULL,0,0}, 
                                Node_Partition = {NULL, NULL,0,0};
  struct Tmat_data Tmat_data;
int i, Ntotal;
 ML_Comm *comm;

  /* See Aztec User's Guide for information on these variables */

#ifdef AZTEC
  AZ_MATRIX    *Ke_mat, *Kn_mat;
  AZ_PRECOND   *Pmat = NULL;
  int          proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
  double       params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE];
#endif


  /* get processor information (proc id & # of procs) and set ML's printlevel. */

#ifdef ML_MPI
  MPI_Init(&argc,&argv);
#endif
#ifdef AZTEC
  AZ_set_proc_config(proc_config, COMMUNICATOR);
#endif
  ML_Set_PrintLevel(10);   /* set ML's output level: 0 gives least output */

  /* Set the # of global nodes/edges and partition both the edges and the */
  /* nodes over the processors. NOTE: I believe we assume that if an edge */
  /* is assigned to a processor at least one of its nodes must be also    */
  /* assigned to that processor.                                          */

  Node_Partition.Nglobal = Nnodes;
  Edge_Partition.Nglobal = Node_Partition.Nglobal*2;
  Node_Partition.type = NODE;
  Edge_Partition.type = EDGE;
#define perxodic
#ifdef periodic
Node_Partition.Nglobal += 2; 
#endif
  partition_edges(&Edge_Partition);
  partition_nodes(&Node_Partition);
xxx = (double *) ML_allocate((Edge_Partition.Nlocal+100)*sizeof(double)); 
rhs = (double *) ML_allocate((Edge_Partition.Nlocal+100)*sizeof(double)); 
 for (i = 0; i < Edge_Partition.Nlocal + 100; i++) xxx[i] = -1.;
 for (i = 0; i < Edge_Partition.Nlocal; i++) xxx[i] = (double) 
        Edge_Partition.my_global_ids[i];

update_ghost_edges(xxx, (void *) &Edge_Partition);


  /* Create an empty multigrid hierarchy and set the 'MaxMGLevels-1'th   */
  /* level discretization within this hierarchy to the ML matrix         */
  /* representing Ke (Maxwell edge discretization).                      */

  ML_Create(&ml_edges, MaxMgLevels);
#ifdef AZTEC
  /* Build Ke as an Aztec matrix. Use built-in function AZ_ML_Set_Amat() */
  /* to convert to an ML matrix and put in hierarchy.                    */

  Ke_mat = user_Ke_build(&Edge_Partition);
  AZ_ML_Set_Amat(ml_edges, MaxMgLevels-1, Edge_Partition.Nlocal,
      		 Edge_Partition.Nlocal, Ke_mat, proc_config);
#else
  /* Build Ke directly as an ML matrix.                                  */

  ML_Init_Amatrix      (ml_edges, MaxMgLevels-1, Edge_Partition.Nlocal,
			Edge_Partition.Nlocal, &Edge_Partition);

  Ntotal = Edge_Partition.Nlocal;
  if (Edge_Partition.nprocs == 2) Ntotal += Edge_Partition.Nghost;
  ML_Set_Amatrix_Getrow(ml_edges, MaxMgLevels-1,  Ke_getrow, update_ghost_edges, Ntotal);
  ML_Set_Amatrix_Matvec(ml_edges, MaxMgLevels-1,  Ke_matvec);

#endif



  /* Build an Aztec matrix representing an auxiliary nodal PDE problem.  */
  /* This should be a variable coefficient Poisson problem (with unknowns*/
  /* at the nodes). The coefficients should be chosen to reflect the     */
  /* conductivity of the original edge problems.                         */
  /* Create an empty multigrid hierarchy. Convert the Aztec matrix to an */
  /* ML matrix and put it in the 'MaxMGLevels-1' level of the hierarchy. */
  /* Note it is possible to multiply T'*T for get this matrix though this*/
  /* will not incorporate material properties.                           */

  ML_Create(&ml_nodes, MaxMgLevels);

#ifdef AZTEC
  Kn_mat = user_Kn_build( &Node_Partition);
  AZ_ML_Set_Amat(ml_nodes, MaxMgLevels-1, Node_Partition.Nlocal, 
		 Node_Partition.Nlocal, Kn_mat, proc_config);
#else
  ML_Init_Amatrix      (ml_nodes, MaxMgLevels-1 , Node_Partition.Nlocal,
			Node_Partition.Nlocal, &Node_Partition);
  Ntotal = Node_Partition.Nlocal;
  if (Node_Partition.nprocs == 2) Ntotal += Node_Partition.Nghost;
  ML_Set_Amatrix_Getrow(ml_nodes, MaxMgLevels-1,  Kn_getrow, update_ghost_nodes, Ntotal);
#endif

  /* Build an ML matrix representing the null space of the PDE problem. */
  /* This should be a discrete gradient (nodes to edges).               */

#ifdef AZTEC
    Tmat = user_T_build (&Edge_Partition, &Node_Partition, 
  		   &(ml_nodes->Amat[MaxMgLevels-1]));
#else
    Tmat = ML_Operator_Create(ml_nodes->comm);
    Tmat_data.edge = &Edge_Partition;
    Tmat_data.node = &Node_Partition;
    Tmat_data.Kn   = &(ml_nodes->Amat[MaxMgLevels-1]);

    ML_Operator_Set_ApplyFuncData( Tmat,	Node_Partition.Nlocal,
				   Edge_Partition.Nlocal, ML_EMPTY, (void *) &Tmat_data, 
				   Edge_Partition.Nlocal, NULL, 0);
    ML_Operator_Set_Getrow( Tmat, ML_INTERNAL, Edge_Partition.Nlocal,Tmat_getrow);
    ML_Operator_Set_ApplyFunc(Tmat, ML_INTERNAL, Tmat_matvec);
  ML_Comm_Create( &comm);

  ML_CommInfoOP_Generate( &(Tmat->getrow->pre_comm), update_ghost_nodes, 
			  &Node_Partition,comm, Tmat->invec_leng, 
			  Node_Partition.Nghost);
#endif


  /********************************************************************/
  /* Set some ML parameters.                                          */
  /*------------------------------------------------------------------*/
	
  ML_Set_ResidualOutputFrequency(ml_edges, 1);
  ML_Set_Tolerance(ml_edges, 1.0e-8);
  ML_Aggregate_Create( &ag );
  ML_Aggregate_Set_CoarsenScheme_Uncoupled(ag);
  ML_Aggregate_Set_DampingFactor(ag, 0.0); /* must use 0 for maxwell */
  ML_Aggregate_Set_MaxCoarseSize(ag, 30);
  ML_Aggregate_Set_Threshold(ag, 0.0);


  /********************************************************************/
  /*                      Set up Tmat_trans                           */
  /*------------------------------------------------------------------*/

  Tmat_trans = ML_Operator_Create(ml_edges->comm);
  ML_Operator_Transpose_byrow(Tmat, Tmat_trans);


  Nlevels=ML_Gen_MGHierarchy_UsingReitzinger(ml_edges, &ml_nodes,MaxMgLevels-1,
					     ML_DECREASING,ag,Tmat,Tmat_trans, 
					     &Tmat_array,&Tmat_trans_array, 
					     smoothPe_flag, 1.5);

  /* Set the Hiptmair subsmoothers */

  if (nodal_smoother == (void *) ML_Gen_Smoother_SymGaussSeidel) {
    nodal_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(nodal_args, 0, &nodal_its);
    ML_Smoother_Arglist_Set(nodal_args, 1, &nodal_omega);
  }
  if (edge_smoother == (void *) ML_Gen_Smoother_SymGaussSeidel) {
    edge_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(edge_args, 0, &edge_its);
    ML_Smoother_Arglist_Set(edge_args, 1, &edge_omega);
  }
  if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) {
    nodal_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(nodal_args, 0, &nodal_its);
    Nfine_node = Tmat_array[MaxMgLevels-1]->invec_leng;
    Nfine_node = ML_gsum_int(Nfine_node, ml_edges->comm);
  }
  if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) {
    edge_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(edge_args, 0, &edge_its);
    Nfine_edge = Tmat_array[MaxMgLevels-1]->outvec_leng;
    Nfine_edge = ML_gsum_int(Nfine_edge, ml_edges->comm);
  }

  /****************************************************
  * Set up smoothers for all levels but the coarsest. *
  ****************************************************/
  coarsest_level = MaxMgLevels - Nlevels;

  for (level = MaxMgLevels-1; level > coarsest_level; level--)
    {
      if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) {
	Ncoarse_edge = Tmat_array[level-1]->outvec_leng;
	Ncoarse_edge = ML_gsum_int(Ncoarse_edge, ml_edges->comm);
	edge_coarsening_rate =  2.*((double) Nfine_edge)/ ((double) Ncoarse_edge);
	ML_Smoother_Arglist_Set(edge_args, 1, &edge_coarsening_rate);
	Nfine_edge = Ncoarse_edge;
      }
      if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) {
	Ncoarse_node = Tmat_array[level-1]->invec_leng;
	Ncoarse_node = ML_gsum_int(Ncoarse_node, ml_edges->comm);
	node_coarsening_rate =  2.*((double) Nfine_node)/ ((double) Ncoarse_node);
	ML_Smoother_Arglist_Set(nodal_args, 1, &node_coarsening_rate);
	Nfine_node = Ncoarse_node;
      }
      ML_Gen_Smoother_Hiptmair(ml_edges, level, ML_BOTH, Nits_per_presmooth,
			       Tmat_array, Tmat_trans_array, NULL, edge_smoother,
			       edge_args, nodal_smoother,nodal_args, hiptmair_type);
    }

  /*******************************************
  * Set up coarsest level smoother
  *******************************************/

  if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) {
    edge_coarsening_rate = (double) Nfine_edge;
    ML_Smoother_Arglist_Set(edge_args, 1, &edge_coarsening_rate);
  }
  if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) {
    node_coarsening_rate = (double) Nfine_node;
    ML_Smoother_Arglist_Set(nodal_args,1,&node_coarsening_rate);
  }
  ML_Gen_CoarseSolverSuperLU( ml_edges, coarsest_level);
  

  /* Must be called before invoking the preconditioner */
  ML_Gen_Solver(ml_edges, ML_MGV, MaxMgLevels-1, coarsest_level); 



  /* Set the initial guess and the right hand side. Invoke solver */	

  xxx = (double *) ML_allocate(Edge_Partition.Nlocal*sizeof(double)); 
  ML_random_vec(xxx, Edge_Partition.Nlocal, ml_edges->comm);
  rhs = (double *) ML_allocate(Edge_Partition.Nlocal*sizeof(double)); 
  ML_random_vec(rhs, Edge_Partition.Nlocal, ml_edges->comm);

#ifdef AZTEC
  /* Choose the Aztec solver and criteria. Also tell Aztec that */
  /* ML will be supplying the preconditioner.                   */

  AZ_defaults(options, params);
  options[AZ_solver]   = AZ_fixed_pt;
  options[AZ_solver]   = AZ_gmres;
  options[AZ_kspace]   = 80;
  params[AZ_tol]       = tolerance;
  AZ_set_ML_preconditioner(&Pmat, Ke_mat, ml_edges, options); 
  options[AZ_conv] = AZ_noscaled;
  AZ_iterate(xxx, rhs, options, params, status, proc_config, Ke_mat, Pmat, NULL);
#else
  ML_Iterate(ml_edges, xxx, rhs);
#endif


  /* clean up. */

  ML_Smoother_Arglist_Delete(&nodal_args);
  ML_Smoother_Arglist_Delete(&edge_args);
  ML_Aggregate_Destroy(&ag);
  ML_Destroy(&ml_edges);
  ML_Destroy(&ml_nodes);
#ifdef AZTEC
  AZ_free((void *) Ke_mat->data_org);
  AZ_free((void *) Ke_mat->val);
  AZ_free((void *) Ke_mat->bindx);
  if (Ke_mat  != NULL) AZ_matrix_destroy(&Ke_mat);
  if (Pmat  != NULL) AZ_precond_destroy(&Pmat);
  if (Kn_mat != NULL) AZ_matrix_destroy(&Kn_mat);
#endif
  free(xxx);
  free(rhs);
  ML_Operator_Destroy(&Tmat);
  ML_Operator_Destroy(&Tmat_trans);
  ML_MGHierarchy_ReitzingerDestroy(MaxMgLevels-2, &Tmat_array, &Tmat_trans_array);

#ifdef ML_MPI
  MPI_Finalize();
#endif
		
  return 0;
		
}
コード例 #5
0
ML_Operator *user_T_build(struct user_partition *Edge_Partition,
		      struct user_partition *Node_Partition,
		      ML_Operator *Kn_mat)
{

  int nx, i, ii, jj, horv, Ncols, Nexterns;
  int *Tmat_bindx;
  double *Tmat_val;
  ML_Operator *Tmat;
  struct ML_CSR_MSRdata *csr_data;
  ML_Comm *comm;
  struct aztec_context *aztec_context;
  int global_id;
  int Nlocal_nodes, Nlocal_edges; 
  int nz_ptr;

  Nlocal_nodes = Node_Partition->Nlocal;
  Nlocal_edges = Edge_Partition->Nlocal;

  nx = (int) sqrt( ((double) Node_Partition->Nglobal) + .00001);
#ifdef periodic
  nx = (int) sqrt( ((double) Node_Partition->Nglobal - 2) + .00001); 
#endif

  Tmat_bindx = (int    *) malloc((6*Nlocal_edges+5)*sizeof(int));
  Tmat_val   = (double *) malloc((6*Nlocal_edges+5)*sizeof(double));

  Tmat_bindx[0] = Nlocal_edges + 1;
  for (i = 0; i < Nlocal_edges; i++) {
    global_id = (Edge_Partition->my_global_ids)[i];
    Tmat_bindx[i+1] = Tmat_bindx[i] + 2;
#ifdef periodic
Tmat_bindx[i+1] += 1; 
#endif
    Tmat_val[i] = 0.0;

    inv2dindex(global_id, &ii, &jj, nx, &horv);
    nz_ptr = Tmat_bindx[i];
    if (horv == HORIZONTAL) {
      Tmat_bindx[nz_ptr] = southwest2d(ii,jj,nx);  Tmat_val[nz_ptr++] = -1.;
      Tmat_bindx[nz_ptr] = southeast2d(ii,jj,nx);  Tmat_val[nz_ptr++] = 1.;
#ifdef periodic
      Tmat_bindx[nz_ptr] = Nlocal_nodes-2;         Tmat_val[nz_ptr++] = 1.;
#endif
    }
    else {
      Tmat_bindx[nz_ptr] = northwest2d(ii,jj,nx);  Tmat_val[nz_ptr++] = -1.;
      Tmat_bindx[nz_ptr] = southwest2d(ii,jj,nx);  Tmat_val[nz_ptr++] =  1.;
#ifdef periodic
      Tmat_bindx[nz_ptr] = Nlocal_nodes - 1;       Tmat_val[nz_ptr++] = 1.;
#endif
    }
  }

  /* Convert the MSR matrix to a CSR matrix. Then use a modified Aztec */
  /* routine to convert the global CSR matrix to a local ML matrix     */
  /* Since this routine does not compute the communication structure,  */
  /* we assume that it is identical to Kn's and just clone it.         */

  csr_data = (struct ML_CSR_MSRdata *) ML_allocate(sizeof(struct ML_CSR_MSRdata));
  csr_data->columns = Tmat_bindx;
  csr_data->values  = Tmat_val;
  ML_MSR2CSR(csr_data, Nlocal_edges, &Ncols);
  aztec_context = (struct aztec_context *) Kn_mat->data;
  Nexterns = (aztec_context->Amat->data_org)[AZ_N_external];

  Nexterns = 0;

  ML_Comm_Create( &comm);

  AZ_Tmat_transform2ml(Nexterns,   Node_Partition->needed_external_ids,
		       reordered_node_externs,
		       Tmat_bindx, Tmat_val, csr_data->rowptr, Nlocal_nodes,
		       Node_Partition->my_global_ids,
		       comm, Nlocal_edges, &Tmat);
  ML_free(csr_data);
  Tmat->data_destroy = ML_CSR_MSRdata_Destroy;
  ML_CommInfoOP_Clone(&(Tmat->getrow->pre_comm), Kn_mat->getrow->pre_comm);

  return(Tmat);
}
コード例 #6
0
// ================================================ ====== ==== ==== == =
// Computes the preconditioner
int ML_Epetra::FaceMatrixFreePreconditioner::ComputePreconditioner(const bool CheckFiltering)
{
  Teuchos::ParameterList & ListCoarse=List_.sublist("face matrix free: coarse");

  /* ML Communicator */
  ML_Comm_Create(&ml_comm_);

  /* Parameter List Options */
  int SmootherSweeps = List_.get("smoother: sweeps", 0);
  MaxLevels = List_.get("max levels",10);
  print_hierarchy= List_.get("print hierarchy",false);
  num_cycles  = List_.get("cycle applications",1);

  /* Sanity Checking*/
  int OperatorDomainPoints =  OperatorDomainMap().NumGlobalPoints();
  int OperatorRangePoints =  OperatorRangeMap().NumGlobalPoints();
  if (OperatorDomainPoints != OperatorRangePoints)
    ML_CHK_ERR(-1); // only square matrices

 /* Output Header */
  if(verbose_ && !Comm_->MyPID()) {
    printf("------------------------------------------------------------------------------\n");
    printf("***\n");
    printf("*** ML_Epetra::FaceMatrixFreePreconditioner [%s]\n",Label());
    printf("***\n");
  }

  /* Invert non-zeros on the diagonal */
  if(SmootherSweeps){
    for (int i = 0; i < InvDiagonal_->MyLength(); ++i)
      if ((*InvDiagonal_)[i] != 0.0)
        (*InvDiagonal_)[i] = 1.0 / (*InvDiagonal_)[i];
    double nrm;
    InvDiagonal_->Norm2(&nrm);
    if(verbose_ && !Comm_->MyPID()) printf("Inverse Diagonal Norm = %6.4e\n",nrm);
  }/*end if*/

  /* Do the eigenvalue estimation for Chebyshev */
  if(SmootherSweeps)
    ML_CHK_ERR(SetupSmoother());


  if(MaxLevels > 0) {
    /* Build the prolongator */
    ML_CHK_ERR(BuildProlongator());

    /* DEBUG: Output matrices */
    if(print_hierarchy) EpetraExt::RowMatrixToMatlabFile("prolongator.dat",*Prolongator_);

    /* Form the coarse matrix */
    ML_CHK_ERR(FormCoarseMatrix());

    /* DEBUG: Output matrices */
    if(print_hierarchy) EpetraExt::RowMatrixToMatlabFile("coarsemat.dat",*CoarseMatrix);

    /* Setup Preconditioner on Coarse Matrix */
    CoarsePC = new MultiLevelPreconditioner(*CoarseMatrix,ListCoarse);
    if(!CoarsePC) ML_CHK_ERR(-2);

    /* Clean Up */
    //delete nullspace;
  }/*end if*/

  return 0;
}/*end ComputePreconditioner*/
コード例 #7
0
int main(int argc, char *argv[])
{

#ifdef ML_MPI
  MPI_Init(&argc,&argv);
  // This next bit of code drops a middle rank out of the calculation. This tests
  // that the Hiptmair smoother does not hang in its apply.  Hiptmair creates
  // two separate ML objects, one for edge and one for nodes.  By default, the ML
  // objects use MPI_COMM_WORLD, whereas the matrix that Hiptmair is being applied to
  // may have an MPI subcommunicator.
  int commWorldSize;
  MPI_Comm_size(MPI_COMM_WORLD,&commWorldSize);
  std::vector<int> splitKey;
  int rankToDrop = 1;
  for (int i=0; i<commWorldSize; ++i)
    splitKey.push_back(0);
  splitKey[rankToDrop] = MPI_UNDEFINED; //drop the last process from subcommunicator
  int myrank;
  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
  MPI_Comm subcomm;
  MPI_Comm_split(MPI_COMM_WORLD, splitKey[myrank], myrank, &subcomm);
  if (myrank == rankToDrop) goto droppedRankLabel;
#endif

{ //scoping to avoid compiler error about goto jumping over initialization
#ifdef ML_MPI
  Epetra_MpiComm Comm(subcomm);
#else
  Epetra_SerialComm Comm;
#endif

  ML_Comm_Create(&mlcomm);

  char *datafile;

#ifdef CurlCurlAndMassAreSeparate
  if (argc != 5 && argc != 7) {
    if (Comm.MyPID() == 0) {
      std::cout << "usage: ml_maxwell.exe <S> <M> <T> <Kn> [edge map] [node map]"
           << std::endl;
      std::cout << "        S = edge stiffness matrix file" << std::endl;
      std::cout << "        M = edge mass matrix file" << std::endl;
      std::cout << "        T = discrete gradient file" << std::endl;
      std::cout << "       Kn = auxiliary nodal FE matrix file" << std::endl;
      std::cout << " edge map = edge distribution over processors" << std::endl;
      std::cout << " node map = node distribution over processors" << std::endl;
      std::cout << argc << std::endl;
    }
#else //ifdef CurlCurlAndMassAreSeparate
  if (argc != 4 && argc != 6) {
    if (Comm.MyPID() == 0) {
      std::cout << "usage: ml_maxwell.exe <A> <T> <Kn> [edge map] [node map]" <<std::endl;
      std::cout << "        A = edge element matrix file" << std::endl;
      std::cout << "        T = discrete gradient file" << std::endl;
      std::cout << "       Kn = auxiliary nodal FE matrix file" << std::endl;
      std::cout << " edge map = edge distribution over processors" << std::endl;
      std::cout << " node map = node distribution over processors" << std::endl;
      std::cout << argc << std::endl;
    }
#endif //ifdef CurlCurlAndMassAreSeparate
#ifdef ML_MPI
    MPI_Finalize();
#endif
    exit(1);
  }

  Epetra_Map *edgeMap, *nodeMap;
  Epetra_CrsMatrix *CCplusM=NULL, *CurlCurl=NULL, *Mass=NULL, *T=NULL, *Kn=NULL;

  // ================================================= //
  // READ IN MAPS FROM FILE                            //
  // ================================================= //
  // every processor reads this in
#ifdef CurlCurlAndMassAreSeparate
  if (argc > 5)
#else
  if (argc > 4)
#endif
  {
    datafile = argv[5];
    if (Comm.MyPID() == 0) {
      printf("Reading in edge map from %s ...\n",datafile);
      fflush(stdout);
    }
    EpetraExt::MatrixMarketFileToMap(datafile, Comm, edgeMap);
    datafile = argv[6];
    if (Comm.MyPID() == 0) {
      printf("Reading in node map from %s ...\n",datafile);
      fflush(stdout);
    }
    EpetraExt::MatrixMarketFileToMap(datafile, Comm, nodeMap);
  }
  else { // linear maps
         // Read the T matrix to determine the map sizes
         // and then construct linear maps

    if (Comm.MyPID() == 0)
      printf("Using linear edge and node maps ...\n");

    const int lineLength = 1025;
    char line[lineLength];
    FILE *handle;
    int M,N,NZ;
#ifdef CurlCurlAndMassAreSeparate
     handle = fopen(argv[3],"r");
#else
     handle = fopen(argv[2],"r");
#endif
    if (handle == 0) EPETRA_CHK_ERR(-1); // file not found
    // Strip off header lines (which start with "%")
    do {
       if(fgets(line, lineLength, handle)==0) {if (handle!=0) fclose(handle);}
    } while (line[0] == '%');
    // Get problem dimensions: M, N, NZ
    if(sscanf(line,"%d %d %d", &M, &N, &NZ)==0) {if (handle!=0) fclose(handle);}
    fclose(handle);
    edgeMap = new Epetra_Map(M,0,Comm);
    nodeMap = new Epetra_Map(N,0,Comm);
  }

  // ===================================================== //
  // READ IN MATRICES FROM FILE                            //
  // ===================================================== //
#ifdef CurlCurlAndMassAreSeparate
  for (int i = 1; i <5; i++) {
    datafile = argv[i];
    if (Comm.MyPID() == 0) {
      printf("reading %s ....\n",datafile); fflush(stdout);
    }
    switch (i) {
    case 1: //Curl
      MatrixMarketFileToCrsMatrix(datafile,*edgeMap,*edgeMap,*edgeMap,CurlCurl);
      break;
    case 2: //Mass
      MatrixMarketFileToCrsMatrix(datafile, *edgeMap, *edgeMap, *edgeMap, Mass);
      break;
    case 3: //Gradient
      MatrixMarketFileToCrsMatrix(datafile, *edgeMap, *edgeMap,*nodeMap, T);
      break;
    case 4: //Auxiliary nodal matrix
      MatrixMarketFileToCrsMatrix(datafile, *nodeMap,*nodeMap, *nodeMap, Kn);
      break;
    } //switch
  } //for (int i = 1; i <5; i++)

#else
  for (int i = 1; i <4; i++) {
    datafile = argv[i];
    if (Comm.MyPID() == 0) {
      printf("reading %s ....\n",datafile); fflush(stdout);
    }
    switch (i) {
    case 1: //Edge element matrix
      MatrixMarketFileToCrsMatrix(datafile,*edgeMap,*edgeMap,*edgeMap,CCplusM);
      break;
    case 2: //Gradient
      MatrixMarketFileToCrsMatrix(datafile, *edgeMap, *edgeMap, *nodeMap, T);
      break;
    case 3: //Auxiliary nodal matrix
      MatrixMarketFileToCrsMatrix(datafile, *nodeMap, *nodeMap, *nodeMap, Kn);
      break;
    } //switch
  } //for (int i = 1; i <4; i++)
#endif //ifdef CurlCurlAndMassAreSeparate

  // ==================================================== //
  // S E T U P   O F    M L   P R E C O N D I T I O N E R //
  // ==================================================== //

  Teuchos::ParameterList MLList;
  int *options    = new int[AZ_OPTIONS_SIZE];
  double *params  = new double[AZ_PARAMS_SIZE];
  ML_Epetra::SetDefaults("maxwell", MLList, options, params);

  MLList.set("ML output", 10);
  MLList.set("aggregation: type", "Uncoupled");
  MLList.set("coarse: max size", 15);
  MLList.set("aggregation: threshold", 0.0);
  //MLList.set("negative conductivity",true);
  //MLList.set("smoother: type", "Jacobi");
  MLList.set("subsmoother: type", "symmetric Gauss-Seidel");
  //MLList.set("max levels", 2);

  // coarse level solve
  MLList.set("coarse: type", "Amesos-KLU");
  //MLList.set("coarse: type", "Hiptmair");
  //MLList.set("coarse: type", "Jacobi");

  //MLList.set("dump matrix: enable", true);

#ifdef CurlCurlAndMassAreSeparate
  //Create the matrix of interest.
  CCplusM = Epetra_MatrixAdd(CurlCurl,Mass,1.0);
#endif

#ifdef CurlCurlAndMassAreSeparate
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*CurlCurl, *Mass, *T, *Kn, MLList);
  // Comment out the line above and uncomment the next one if you have
  // mass and curl separately but want to precondition as if they are added
  // together.
/*
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*CCplusM, *T, *Kn, MLList);
*/
#else
  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*CCplusM, *T, *Kn, MLList);
#endif //ifdef CurlCurlAndMassAreSeparate

  MLPrec->PrintUnused(0);

  // ========================================================= //
  // D E F I N I T I O N   O F   A Z T E C O O   P R O B L E M //
  // ========================================================= //

  // create left-hand side and right-hand side, and populate them with
  // data from file. Both vectors are defined on the domain map of the
  // edge matrix.
  // Epetra_Vectors can be created in View mode, to accept pointers to
  // double vectors.

  if (Comm.MyPID() == 0)
    std::cout << "Putting in a zero initial guess and random rhs (in the range of S+M)" << std::endl;
  Epetra_Vector x(CCplusM->DomainMap());
  x.Random();
  Epetra_Vector rhs(CCplusM->DomainMap());
  CCplusM->Multiply(false,x,rhs);
  x.PutScalar(0.0);

  double vecnorm;
  rhs.Norm2(&vecnorm);
  if (Comm.MyPID() == 0) std::cout << "||rhs|| = " << vecnorm << std::endl;
  x.Norm2(&vecnorm);
  if (Comm.MyPID() == 0) std::cout << "||x|| = " << vecnorm << std::endl;

  // for AztecOO, we need an Epetra_LinearProblem
  Epetra_LinearProblem Problem(CCplusM,&x,&rhs);
  // AztecOO Linear problem
  AztecOO solver(Problem);
  // set MLPrec as precondititoning operator for AztecOO linear problem
  //std::cout << "no ml preconditioner!!!" << std::endl;
  solver.SetPrecOperator(MLPrec);
  //solver.SetAztecOption(AZ_precond, AZ_Jacobi);

  // a few options for AztecOO
  solver.SetAztecOption(AZ_solver, AZ_cg);
  solver.SetAztecOption(AZ_output, 1);

  solver.Iterate(15, 1e-3);

  // =============== //
  // C L E A N   U P //
  // =============== //

  delete MLPrec;    // destroy phase prints out some information
  delete CurlCurl;
  delete CCplusM;
  delete Mass;
  delete T;
  delete Kn;
  delete nodeMap;
  delete edgeMap;
  delete [] params;
  delete [] options;

  ML_Comm_Destroy(&mlcomm);
} //avoids compiler error about jumping over initialization
  droppedRankLabel:
#ifdef ML_MPI
  MPI_Finalize();
#endif

  return 0;

} //main

#else

#include <stdlib.h>
#include <stdio.h>
#ifdef HAVE_MPI
#include "mpi.h"
#endif

int main(int argc, char *argv[])
{
#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
#endif

  puts("Please configure ML with:");
#if !defined(HAVE_ML_EPETRA)
  puts("--enable-epetra");
#endif
#if !defined(HAVE_ML_TEUCHOS)
  puts("--enable-teuchos");
#endif
#if !defined(HAVE_ML_EPETRAEXT)
  puts("--enable-epetraext");
#endif
#if !defined(HAVE_ML_AZTECOO)
  puts("--enable-aztecoo");
#endif

#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return 0;
}
コード例 #8
0
// ====================================================================== 
void Init() 
{
  if (ML_Comm_ == 0) ML_Comm_Create(&ML_Comm_);
  if (Epetra_Comm_ == 0) {
#ifdef HAVE_MPI
    Epetra_Comm_ = new Epetra_MpiComm(MPI_COMM_WORLD);
#else
    Epetra_Comm_ = new Epetra_SerialComm;
#endif
  }

  char * str = (char *) getenv("ML_BREAK_FOR_DEBUGGER");
  int i = 0, j = 0;
  char buf[80];
  char go = ' ';
  char hostname[80];
  if (str != NULL) i++;

  FILE * ML_capture_flag;
  ML_capture_flag = fopen("ML_debug_now","r");
  if(ML_capture_flag) {
    i++;
    fclose(ML_capture_flag);
  }

  GetEpetra_Comm().SumAll(&i, &j, 1);

  if (j != 0)
  {
    if (GetMyPID()  == 0) std::cout << "Host and Process Ids for tasks" << std::endl;
    for (i = 0; i < GetNumProcs() ; i++) {
      if (i == GetMyPID() ) {
#if defined(TFLOP) || defined(JANUS_STLPORT) || defined(COUGAR)
        sprintf(buf, "Host: %s   PID: %d", "janus", getpid());
#else
        gethostname(hostname, sizeof(hostname));
        sprintf(buf, "Host: %s\tMyPID(): %d\tPID: %d",
                hostname, GetMyPID(), getpid());
#endif
        printf("%s\n",buf);
        fflush(stdout);
#ifdef ICL
        Sleep(1);
#else
        sleep(1);
#endif
      }
    }
    if (GetMyPID() == 0) {
      printf("\n");
      printf("** Pausing because environment variable ML_BREAK_FOR_DEBUGGER has been set,\n");
      puts("** or file ML_debug_now has been created");
      printf("**\n");
      printf("** You may now attach debugger to the processes listed above.\n");
      printf( "**\n");
      printf( "** Enter a character to continue > "); fflush(stdout);
      scanf("%c",&go);
    }
  }

  ML_Set_PrintLevel(10);
}
コード例 #9
0
int main(int argc, char *argv[])
{
  int    Nnodes=32*32;              /* Total number of nodes in the problem.*/
                                    /* 'Nnodes' must be a perfect square.   */

  struct       user_partition Edge_Partition = {NULL, NULL,0,0,NULL,0,0,0}, 
                                Node_Partition = {NULL, NULL,0,0,NULL,0,0,0};

  int          proc_config[AZ_PROC_SIZE];

#ifdef ML_MPI
  MPI_Init(&argc,&argv);
#endif

  AZ_set_proc_config(proc_config, COMMUNICATOR);
  ML_Comm* comm;
  ML_Comm_Create(&comm);

  Node_Partition.Nglobal = Nnodes;
  Edge_Partition.Nglobal = Node_Partition.Nglobal*2;

  user_partition_nodes(&Node_Partition);
  user_partition_edges(&Edge_Partition, &Node_Partition);
  
  AZ_MATRIX * AZ_Ke = user_Ke_build(&Edge_Partition);
  AZ_MATRIX * AZ_Kn = user_Kn_build(&Node_Partition);

  // convert (put wrappers) from Aztec matrices to ML_Operator's

  ML_Operator * ML_Ke, * ML_Kn, * ML_Tmat;

  ML_Ke = ML_Operator_Create( comm );
  ML_Kn = ML_Operator_Create( comm );

  AZ_convert_aztec_matrix_2ml_matrix(AZ_Ke,ML_Ke,proc_config);
  AZ_convert_aztec_matrix_2ml_matrix(AZ_Kn,ML_Kn,proc_config);

  ML_Tmat = user_T_build(&Edge_Partition, &Node_Partition, 
		      ML_Kn, comm);

  Epetra_CrsMatrix * Epetra_Kn, * Epetra_Ke, * Epetra_T;
  
  int MaxNumNonzeros;
  double CPUTime;

  ML_Operator2EpetraCrsMatrix(ML_Ke,Epetra_Ke,
			      MaxNumNonzeros,
			      true,CPUTime);

  ML_Operator2EpetraCrsMatrix(ML_Kn,
			      Epetra_Kn,MaxNumNonzeros,
			      true,CPUTime);

  ML_Operator2EpetraCrsMatrix(ML_Tmat,Epetra_T,MaxNumNonzeros,
			      true,CPUTime);  

  Teuchos::ParameterList MLList;
  ML_Epetra::SetDefaults("maxwell", MLList);
  
  MLList.set("ML output", 0);

  MLList.set("aggregation: type", "Uncoupled");
  MLList.set("coarse: max size", 30);
  MLList.set("aggregation: threshold", 0.0);

  MLList.set("coarse: type", "Amesos-KLU");

  ML_Epetra::MultiLevelPreconditioner * MLPrec =
    new ML_Epetra::MultiLevelPreconditioner(*Epetra_Ke, *Epetra_T, *Epetra_Kn,
					    MLList);

  Epetra_Vector LHS(Epetra_Ke->DomainMap()); LHS.Random();
  Epetra_Vector RHS(Epetra_Ke->DomainMap()); RHS.PutScalar(1.0);
  
  Epetra_LinearProblem Problem(Epetra_Ke,&LHS,&RHS);
  AztecOO solver(Problem);
  solver.SetPrecOperator(MLPrec);

  solver.SetAztecOption(AZ_solver, AZ_cg_condnum);
  solver.SetAztecOption(AZ_output, 32);
  solver.Iterate(500, 1e-8);

  // ========================= //
  // compute the real residual //
  // ========================= //

  Epetra_Vector RHScomp(Epetra_Ke->DomainMap());
  int ierr;
  ierr = Epetra_Ke->Multiply(false, LHS, RHScomp);
  assert(ierr==0);

  Epetra_Vector resid(Epetra_Ke->DomainMap());

  ierr = resid.Update(1.0, RHS, -1.0, RHScomp, 0.0);
  assert(ierr==0);

  double residual;
  ierr = resid.Norm2(&residual);
  assert(ierr==0);
  if (proc_config[AZ_node] == 0) {
    std::cout << std::endl;
    std::cout << "==> Residual = " << residual << std::endl;
    std::cout << std::endl;
  }

  // =============== //
  // C L E A N   U P //
  // =============== //
  
  delete MLPrec;    // destroy phase prints out some information
  delete Epetra_Kn;
  delete Epetra_Ke;
  delete Epetra_T;
  
  ML_Operator_Destroy( &ML_Ke );
  ML_Operator_Destroy( &ML_Kn );
  ML_Comm_Destroy( &comm );

  if (Edge_Partition.my_local_ids != NULL) free(Edge_Partition.my_local_ids);
  if (Node_Partition.my_local_ids != NULL) free(Node_Partition.my_local_ids);
  if (Node_Partition.my_global_ids != NULL) free(Node_Partition.my_global_ids);
  if (Edge_Partition.my_global_ids != NULL) free(Edge_Partition.my_global_ids);
  if (Node_Partition.needed_external_ids != NULL) 
    free(Node_Partition.needed_external_ids);
  if (Edge_Partition.needed_external_ids != NULL) 
    free(Edge_Partition.needed_external_ids);

  if (AZ_Ke!= NULL) {
    AZ_free(AZ_Ke->bindx);
    AZ_free(AZ_Ke->val);
    AZ_free(AZ_Ke->data_org);
    AZ_matrix_destroy(&AZ_Ke);
  }
  if (AZ_Kn!= NULL) {
    AZ_free(AZ_Kn->bindx);
    AZ_free(AZ_Kn->val);
    AZ_free(AZ_Kn->data_org);
    AZ_matrix_destroy(&AZ_Kn);
  }

  ML_Operator_Destroy(&ML_Tmat);

  if (residual > 1e-5) {
    std::cout << "`MultiLevelPreconditioner_Maxwell.exe' failed!" << std::endl;
    exit(EXIT_FAILURE);
  }

#ifdef ML_MPI
  MPI_Finalize();
#endif
		
  if (proc_config[AZ_node] == 0)
    std::cout << "`MultiLevelPreconditioner_Maxwell.exe' passed!" << std::endl;
  exit(EXIT_SUCCESS);
		
}