예제 #1
0
void sample2(struct data *Afine_data, struct data *Acoarse_data,
	     struct data  *Rmat_data, struct data    *Pmat_data,
	     double *sol, double *rhs)
{
   ML *my_ml;
   struct data fsmooth, csmooth;
   int fine_grid, output_level = 10, N_grids = 2, grid0 = 0, grid1 = 1;
   int Nfine, Ncoarse;

   Nfine   = Rmat_data->from_size;
   Ncoarse = Rmat_data->to_size;

   fsmooth.size           = Nfine;
   fsmooth.ntimes         = 4;
   fsmooth.processor_info = Afine_data->processor_info;
   csmooth.size           = Ncoarse;
   csmooth.processor_info = Afine_data->processor_info;
   csmooth.ntimes         = 1000;

   fine_grid = grid1;
   ML_Create         (&my_ml, N_grids);
   ML_Set_OutputLevel( my_ml, output_level);

   ML_Init_Amatrix      (my_ml, grid1,   Nfine,   Nfine, (void *) Afine_data);
   ML_Set_Amatrix_Matvec(my_ml, grid1,  mymatvec);

   ML_Init_Amatrix      (my_ml, grid0, Ncoarse, Ncoarse, (void *) Acoarse_data);
   ML_Set_Amatrix_Matvec(my_ml, grid0,  mymatvec);

   ML_Init_Restrictor(my_ml, grid1, grid0, Nfine, Ncoarse,(void *)Rmat_data);
   ML_Set_Restrictor_Matvec(my_ml,  grid1, myrestrict);

   ML_Init_Prolongator(my_ml, grid0, grid1, Ncoarse, Nfine,(void *)Pmat_data);
   ML_Set_Prolongator_Matvec(my_ml,  grid0, myinterp);

   ML_Set_Smoother   (my_ml, grid1,    ML_PRESMOOTHER, (void *)&fsmooth,mysmooth, NULL);
   ML_Set_Smoother   (my_ml, grid0,    ML_PRESMOOTHER, (void *)&csmooth,mysmooth, NULL);

   ML_Gen_Solver    (my_ml, 0, fine_grid, grid0 );
   ML_Iterate(my_ml, sol, rhs);
   ML_Destroy(&my_ml);
}
예제 #2
0
void sample3(struct data *Afine_data, struct data *Acoarse_data,
	     struct data  *Rmat_data, struct data    *Pmat_data,
	     double *sol, double *rhs )
{
   ML *my_ml;
   double *diagonal;
   int    i, fine_grid, output_level = 10, N_grids = 2, grid0 = 1, grid1 = 0;
   int    Nfine, Ncoarse;

   Nfine    = Rmat_data->from_size;
   Ncoarse  = Rmat_data->to_size;
   diagonal = (double *) malloc(Nfine*sizeof(double));
   for (i = 0; i < Nfine; i++) diagonal[i] = 2.;

   fine_grid = grid1;
   ML_Create         (&my_ml, N_grids);
   ML_Set_OutputLevel(my_ml, output_level);

   ML_Init_Amatrix      (my_ml, grid1, Nfine, Nfine, (void *) Afine_data);
   ML_Set_Amatrix_Matvec(my_ml, grid1, mymatvec  );
   ML_Set_Amatrix_Diag  (my_ml, grid1, Nfine, diagonal);
   ML_Gen_Smoother_Jacobi(my_ml, grid1, ML_PRESMOOTHER, 2, ML_DEFAULT);

   ML_Init_Amatrix      (my_ml, grid0, Ncoarse, Ncoarse, (void *) Acoarse_data);
   ML_Set_Amatrix_Matvec(my_ml, grid0, mymatvec);
   ML_Set_Amatrix_Diag  (my_ml, grid0, Ncoarse, diagonal);
   ML_Gen_Smoother_Jacobi(my_ml, grid0, ML_PRESMOOTHER, 200, ML_DEFAULT);


   ML_Init_Prolongator(my_ml, grid0, grid1, Ncoarse, Nfine, (void*)Pmat_data);
   ML_Set_Prolongator_Matvec(my_ml,  grid0, myinterp);

   ML_Init_Restrictor(my_ml, grid1, grid0, Nfine, Ncoarse,(void *)Rmat_data);
   ML_Set_Restrictor_Matvec(my_ml,  grid1, myrestrict);

   ML_Gen_Solver    (my_ml, 0, fine_grid, grid0);
   ML_free(diagonal);

   ML_Iterate(my_ml, sol, rhs);
   ML_Destroy(&my_ml);

}
예제 #3
0
void sample1(struct data *Afine_data, struct data *Acoarse_data,
	     struct data  *Rmat_data, struct data    *Pmat_data,
	     double *sol, double *rhs )
{
   ML  *my_ml;
   int i;
   int fine_grid, output_level = 10, N_grids = 2, grid0 = 0, grid1 = 1;
   int Nfine, Ncoarse;
   double *diagonal;

   Nfine    = Rmat_data->from_size;
   Ncoarse  = Rmat_data->to_size;
   diagonal = (double *) malloc(Nfine*sizeof(double));
   for (i = 0; i < Nfine; i++) diagonal[i] = 2.;

   fine_grid   = grid1;
   ML_Create         (&my_ml, N_grids);
   ML_Set_OutputLevel( my_ml, output_level);

   ML_Init_Amatrix      (my_ml, grid1,  Nfine, Nfine,(void *) Afine_data);
   ML_Set_Amatrix_Getrow(my_ml, grid1,  myAgetrow, my_comm, Nfine+1);
   ML_Set_Amatrix_Matvec(my_ml, grid1,  mymatvec);
   ML_Set_Amatrix_Diag  (my_ml, grid1,  Nfine, diagonal);
   ML_Gen_Smoother_Jacobi(my_ml, grid1,  ML_PRESMOOTHER, 2, ML_DEFAULT);

   ML_Init_Prolongator(my_ml, grid0, grid1, Ncoarse,Nfine,(void *)Pmat_data);
   ML_Set_Prolongator_Getrow(my_ml,  grid0, myPgetrow, my_comm, Ncoarse+1);
   ML_Set_Prolongator_Matvec(my_ml,  grid0, myinterp);

   ML_Init_Restrictor(my_ml, grid1, grid0, Nfine, Ncoarse,(void *)Rmat_data);
   ML_Set_Restrictor_Getrow(my_ml,  grid1, myRgetrow, my_comm, Nfine+1);
   ML_Set_Restrictor_Matvec(my_ml,  grid1, myrestrict);


   ML_Gen_AmatrixRAP(my_ml,grid1, grid0);
#ifdef SUPERLU
   ML_Gen_CoarseSolverSuperLU(my_ml, grid0);
#else
   ML_Gen_Smoother_Jacobi(my_ml, grid0,  ML_PRESMOOTHER, 100, ML_DEFAULT);
#endif

/* ML_Gen_Smoother_Jacobi(my_ml, grid0, ML_PRESMOOTHER, 200, ML_DEFAULT); */
/* ML_Gen_Smoother_GaussSeidel(my_ml, grid0, ML_PRESMOOTHER, 200, 1.);    */
   ML_Gen_Solver    (my_ml, 0, fine_grid, grid0);
   ML_Iterate(my_ml, sol, rhs);
   ML_Destroy(&my_ml);
   ML_free(diagonal);

}
예제 #4
0
int main(int argc, char *argv[]){


   ML *ml_object;
   int i, N_grids = 3, N_levels;
   double sol[5], rhs[5];
   ML_Aggregate *agg_object;
   int proc, nlocal, nlocal_allcolumns;

   MPI_Init(&argc,&argv);
   ML_Set_PrintLevel(15);

   for (i = 0; i < 5; i++) sol[i] = 0.;
   for (i = 0; i < 5; i++) rhs[i] = 2.;


   ML_Create         (&ml_object, N_grids);
   proc = ml_object->comm->ML_mypid;
   if (ml_object->comm->ML_nprocs != 2) {
      if (proc == 0) printf("Must be run on two processors\n");
      ML_Destroy(&ml_object);
      MPI_Finalize();
      exit(1);
   }

   if     (proc == 0) {nlocal = 2; nlocal_allcolumns = 4;}
   else if (proc == 1){nlocal = 3; nlocal_allcolumns = 5;}
   else               {nlocal = 0; nlocal_allcolumns = 0;}

   ML_Init_Amatrix      (ml_object, 0,  nlocal, nlocal, &proc);
   ML_Set_Amatrix_Getrow(ml_object, 0,  Poisson_getrow, Poisson_comm,
                         nlocal_allcolumns);
   ML_Set_Amatrix_Matvec(ml_object, 0,  Poisson_matvec);

   ML_Aggregate_Create(&agg_object);
   ML_Aggregate_Set_MaxCoarseSize(agg_object,1);
   N_levels = ML_Gen_MGHierarchy_UsingAggregation(ml_object, 0,
                                                  ML_INCREASING, agg_object);
   ML_Gen_Smoother_Jacobi(ml_object, ML_ALL_LEVELS, ML_PRESMOOTHER, 1, ML_DEFAULT);

   ML_Gen_Solver    (ml_object, ML_MGV, 0, N_levels-1);
   ML_Iterate(ml_object, sol, rhs);

   if (proc == 0) {
      printf("sol(0) = %e\n",sol[1]);
      fflush(stdout);
   }
   ML_Comm_GsumInt(ml_object->comm,1);    /* just used for synchronization */
   if (proc == 1) {
      printf("sol(1) = %e\n",sol[0]);
      printf("sol(2) = %e\n",sol[1]);
      printf("sol(3) = %e\n",sol[2]);
      fflush(stdout);
   }
   ML_Comm_GsumInt(ml_object->comm,1);    /* just used for synchronization */
   if (proc == 0) {
      printf("sol(4) = %e\n",sol[0]);
      fflush(stdout);
   }
   ML_Aggregate_Destroy(&agg_object);
   ML_Destroy(&ml_object);

   MPI_Finalize();

   return 0;
}
예제 #5
0
int main(int argc, char *argv[])
{
  int    Nnodes=16*16;              /* Total number of nodes in the problem.*/
                                    /* 'Nnodes' must be a perfect square.   */
  int    MaxMgLevels=6;             /* Maximum number of Multigrid Levels   */
  int    Nits_per_presmooth=1;      /* # of pre & post smoothings per level */
  double tolerance = 1.0e-8;        /* At convergence:                      */
                                    /*   ||r_k||_2 < tolerance ||r_0||_2    */
  int smoothPe_flag = ML_YES;       /* ML_YES: smooth tentative prolongator */
                                    /* ML_NO: don't smooth prolongator      */

  /***************************************************************************/
  /* Select Hiptmair relaxation subsmoothers for the nodal and edge problems */
  /* Choices include                                                         */
  /*   1) ML_Gen_Smoother_SymGaussSeidel: this corresponds to a processor    */
  /*      local version of symmetric Gauss-Seidel/SOR. The number of sweeps  */
  /*      can be set via either 'edge_its' or 'nodal_its'. The damping can   */
  /*      be set via 'edge_omega' or 'nodal_omega'. When set to ML_DDEFAULT, */
  /*      the damping is set to '1' on one processor. On multiple processors */
  /*      a lower damping value is set. This is needed to converge processor */
  /*      local SOR.                                                         */
  /*   2) ML_Gen_Smoother_Cheby: this corresponds to polynomial relaxation.    */
  /*      The degree of the polynomial is set via 'edge_its' or 'nodal_its'. */
  /*      If the degree is '-1', Marian Brezina's MLS polynomial is chosen.  */
  /*      Otherwise, a Chebyshev polynomial is used over high frequencies    */
  /*      [ lambda_max/alpha , lambda_max]. Lambda_max is computed. 'alpha'  */
  /*      is hardwired in this example to correspond to twice the ratio of   */
  /*      unknowns in the fine and coarse meshes.                            */
  /*                                                                         */
  /* Using 'hiptmair_type' (see comments below) it is also possible to choose*/
  /* when edge and nodal problems are relaxed within the Hiptmair smoother.  */
  /***************************************************************************/

  void  *edge_smoother=(void *)     /* Edge relaxation:                     */
               ML_Gen_Smoother_Cheby; /*   ML_Gen_Smoother_Cheby            */
                                    /*     ML_Gen_Smoother_SymGaussSeidel   */
  void *nodal_smoother=(void *)     /* Nodal relaxation                     */
               ML_Gen_Smoother_Cheby;/*     ML_Gen_Smoother_Cheby           */
                                    /*     ML_Gen_Smoother_SymGaussSeidel   */

  int  edge_its = 3;                /* Iterations or polynomial degree for  */
  int  nodal_its = 3;               /* edge/nodal subsmoothers.             */
  double nodal_omega = ML_DDEFAULT, /* SOR damping parameter for noda/edge  */
         edge_omega  = ML_DDEFAULT; /* subsmoothers (see comments above).   */
  int   hiptmair_type=HALF_HIPTMAIR;/* FULL_HIPTMAIR: each invokation       */
                                    /*     smoothes on edges, then nodes,   */
                                    /*     and then once again on edges.    */
                                    /* HALF_HIPTMAIR: each pre-invokation   */
                                    /*     smoothes on edges, then nodes.   */
                                    /*     Each post-invokation smoothes    */
                                    /*     on nodes then edges. .           */


  ML_Operator  *Tmat, *Tmat_trans, **Tmat_array, **Tmat_trans_array;
  ML           *ml_edges, *ml_nodes;
  ML_Aggregate *ag;
  int          Nfine_edge, Ncoarse_edge, Nfine_node, Ncoarse_node, Nlevels;
  int          level, coarsest_level, itmp;
  double       edge_coarsening_rate, node_coarsening_rate, *rhs, *xxx;
  void         **edge_args, **nodal_args;
  struct       user_partition Edge_Partition = {NULL, NULL,0,0}, 
                                Node_Partition = {NULL, NULL,0,0};
  struct Tmat_data Tmat_data;
int i, Ntotal;
 ML_Comm *comm;

  /* See Aztec User's Guide for information on these variables */

#ifdef AZTEC
  AZ_MATRIX    *Ke_mat, *Kn_mat;
  AZ_PRECOND   *Pmat = NULL;
  int          proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
  double       params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE];
#endif


  /* get processor information (proc id & # of procs) and set ML's printlevel. */

#ifdef ML_MPI
  MPI_Init(&argc,&argv);
#endif
#ifdef AZTEC
  AZ_set_proc_config(proc_config, COMMUNICATOR);
#endif
  ML_Set_PrintLevel(10);   /* set ML's output level: 0 gives least output */

  /* Set the # of global nodes/edges and partition both the edges and the */
  /* nodes over the processors. NOTE: I believe we assume that if an edge */
  /* is assigned to a processor at least one of its nodes must be also    */
  /* assigned to that processor.                                          */

  Node_Partition.Nglobal = Nnodes;
  Edge_Partition.Nglobal = Node_Partition.Nglobal*2;
  Node_Partition.type = NODE;
  Edge_Partition.type = EDGE;
#define perxodic
#ifdef periodic
Node_Partition.Nglobal += 2; 
#endif
  partition_edges(&Edge_Partition);
  partition_nodes(&Node_Partition);
xxx = (double *) ML_allocate((Edge_Partition.Nlocal+100)*sizeof(double)); 
rhs = (double *) ML_allocate((Edge_Partition.Nlocal+100)*sizeof(double)); 
 for (i = 0; i < Edge_Partition.Nlocal + 100; i++) xxx[i] = -1.;
 for (i = 0; i < Edge_Partition.Nlocal; i++) xxx[i] = (double) 
        Edge_Partition.my_global_ids[i];

update_ghost_edges(xxx, (void *) &Edge_Partition);


  /* Create an empty multigrid hierarchy and set the 'MaxMGLevels-1'th   */
  /* level discretization within this hierarchy to the ML matrix         */
  /* representing Ke (Maxwell edge discretization).                      */

  ML_Create(&ml_edges, MaxMgLevels);
#ifdef AZTEC
  /* Build Ke as an Aztec matrix. Use built-in function AZ_ML_Set_Amat() */
  /* to convert to an ML matrix and put in hierarchy.                    */

  Ke_mat = user_Ke_build(&Edge_Partition);
  AZ_ML_Set_Amat(ml_edges, MaxMgLevels-1, Edge_Partition.Nlocal,
      		 Edge_Partition.Nlocal, Ke_mat, proc_config);
#else
  /* Build Ke directly as an ML matrix.                                  */

  ML_Init_Amatrix      (ml_edges, MaxMgLevels-1, Edge_Partition.Nlocal,
			Edge_Partition.Nlocal, &Edge_Partition);

  Ntotal = Edge_Partition.Nlocal;
  if (Edge_Partition.nprocs == 2) Ntotal += Edge_Partition.Nghost;
  ML_Set_Amatrix_Getrow(ml_edges, MaxMgLevels-1,  Ke_getrow, update_ghost_edges, Ntotal);
  ML_Set_Amatrix_Matvec(ml_edges, MaxMgLevels-1,  Ke_matvec);

#endif



  /* Build an Aztec matrix representing an auxiliary nodal PDE problem.  */
  /* This should be a variable coefficient Poisson problem (with unknowns*/
  /* at the nodes). The coefficients should be chosen to reflect the     */
  /* conductivity of the original edge problems.                         */
  /* Create an empty multigrid hierarchy. Convert the Aztec matrix to an */
  /* ML matrix and put it in the 'MaxMGLevels-1' level of the hierarchy. */
  /* Note it is possible to multiply T'*T for get this matrix though this*/
  /* will not incorporate material properties.                           */

  ML_Create(&ml_nodes, MaxMgLevels);

#ifdef AZTEC
  Kn_mat = user_Kn_build( &Node_Partition);
  AZ_ML_Set_Amat(ml_nodes, MaxMgLevels-1, Node_Partition.Nlocal, 
		 Node_Partition.Nlocal, Kn_mat, proc_config);
#else
  ML_Init_Amatrix      (ml_nodes, MaxMgLevels-1 , Node_Partition.Nlocal,
			Node_Partition.Nlocal, &Node_Partition);
  Ntotal = Node_Partition.Nlocal;
  if (Node_Partition.nprocs == 2) Ntotal += Node_Partition.Nghost;
  ML_Set_Amatrix_Getrow(ml_nodes, MaxMgLevels-1,  Kn_getrow, update_ghost_nodes, Ntotal);
#endif

  /* Build an ML matrix representing the null space of the PDE problem. */
  /* This should be a discrete gradient (nodes to edges).               */

#ifdef AZTEC
    Tmat = user_T_build (&Edge_Partition, &Node_Partition, 
  		   &(ml_nodes->Amat[MaxMgLevels-1]));
#else
    Tmat = ML_Operator_Create(ml_nodes->comm);
    Tmat_data.edge = &Edge_Partition;
    Tmat_data.node = &Node_Partition;
    Tmat_data.Kn   = &(ml_nodes->Amat[MaxMgLevels-1]);

    ML_Operator_Set_ApplyFuncData( Tmat,	Node_Partition.Nlocal,
				   Edge_Partition.Nlocal, ML_EMPTY, (void *) &Tmat_data, 
				   Edge_Partition.Nlocal, NULL, 0);
    ML_Operator_Set_Getrow( Tmat, ML_INTERNAL, Edge_Partition.Nlocal,Tmat_getrow);
    ML_Operator_Set_ApplyFunc(Tmat, ML_INTERNAL, Tmat_matvec);
  ML_Comm_Create( &comm);

  ML_CommInfoOP_Generate( &(Tmat->getrow->pre_comm), update_ghost_nodes, 
			  &Node_Partition,comm, Tmat->invec_leng, 
			  Node_Partition.Nghost);
#endif


  /********************************************************************/
  /* Set some ML parameters.                                          */
  /*------------------------------------------------------------------*/
	
  ML_Set_ResidualOutputFrequency(ml_edges, 1);
  ML_Set_Tolerance(ml_edges, 1.0e-8);
  ML_Aggregate_Create( &ag );
  ML_Aggregate_Set_CoarsenScheme_Uncoupled(ag);
  ML_Aggregate_Set_DampingFactor(ag, 0.0); /* must use 0 for maxwell */
  ML_Aggregate_Set_MaxCoarseSize(ag, 30);
  ML_Aggregate_Set_Threshold(ag, 0.0);


  /********************************************************************/
  /*                      Set up Tmat_trans                           */
  /*------------------------------------------------------------------*/

  Tmat_trans = ML_Operator_Create(ml_edges->comm);
  ML_Operator_Transpose_byrow(Tmat, Tmat_trans);


  Nlevels=ML_Gen_MGHierarchy_UsingReitzinger(ml_edges, &ml_nodes,MaxMgLevels-1,
					     ML_DECREASING,ag,Tmat,Tmat_trans, 
					     &Tmat_array,&Tmat_trans_array, 
					     smoothPe_flag, 1.5);

  /* Set the Hiptmair subsmoothers */

  if (nodal_smoother == (void *) ML_Gen_Smoother_SymGaussSeidel) {
    nodal_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(nodal_args, 0, &nodal_its);
    ML_Smoother_Arglist_Set(nodal_args, 1, &nodal_omega);
  }
  if (edge_smoother == (void *) ML_Gen_Smoother_SymGaussSeidel) {
    edge_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(edge_args, 0, &edge_its);
    ML_Smoother_Arglist_Set(edge_args, 1, &edge_omega);
  }
  if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) {
    nodal_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(nodal_args, 0, &nodal_its);
    Nfine_node = Tmat_array[MaxMgLevels-1]->invec_leng;
    Nfine_node = ML_gsum_int(Nfine_node, ml_edges->comm);
  }
  if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) {
    edge_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(edge_args, 0, &edge_its);
    Nfine_edge = Tmat_array[MaxMgLevels-1]->outvec_leng;
    Nfine_edge = ML_gsum_int(Nfine_edge, ml_edges->comm);
  }

  /****************************************************
  * Set up smoothers for all levels but the coarsest. *
  ****************************************************/
  coarsest_level = MaxMgLevels - Nlevels;

  for (level = MaxMgLevels-1; level > coarsest_level; level--)
    {
      if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) {
	Ncoarse_edge = Tmat_array[level-1]->outvec_leng;
	Ncoarse_edge = ML_gsum_int(Ncoarse_edge, ml_edges->comm);
	edge_coarsening_rate =  2.*((double) Nfine_edge)/ ((double) Ncoarse_edge);
	ML_Smoother_Arglist_Set(edge_args, 1, &edge_coarsening_rate);
	Nfine_edge = Ncoarse_edge;
      }
      if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) {
	Ncoarse_node = Tmat_array[level-1]->invec_leng;
	Ncoarse_node = ML_gsum_int(Ncoarse_node, ml_edges->comm);
	node_coarsening_rate =  2.*((double) Nfine_node)/ ((double) Ncoarse_node);
	ML_Smoother_Arglist_Set(nodal_args, 1, &node_coarsening_rate);
	Nfine_node = Ncoarse_node;
      }
      ML_Gen_Smoother_Hiptmair(ml_edges, level, ML_BOTH, Nits_per_presmooth,
			       Tmat_array, Tmat_trans_array, NULL, edge_smoother,
			       edge_args, nodal_smoother,nodal_args, hiptmair_type);
    }

  /*******************************************
  * Set up coarsest level smoother
  *******************************************/

  if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) {
    edge_coarsening_rate = (double) Nfine_edge;
    ML_Smoother_Arglist_Set(edge_args, 1, &edge_coarsening_rate);
  }
  if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) {
    node_coarsening_rate = (double) Nfine_node;
    ML_Smoother_Arglist_Set(nodal_args,1,&node_coarsening_rate);
  }
  ML_Gen_CoarseSolverSuperLU( ml_edges, coarsest_level);
  

  /* Must be called before invoking the preconditioner */
  ML_Gen_Solver(ml_edges, ML_MGV, MaxMgLevels-1, coarsest_level); 



  /* Set the initial guess and the right hand side. Invoke solver */	

  xxx = (double *) ML_allocate(Edge_Partition.Nlocal*sizeof(double)); 
  ML_random_vec(xxx, Edge_Partition.Nlocal, ml_edges->comm);
  rhs = (double *) ML_allocate(Edge_Partition.Nlocal*sizeof(double)); 
  ML_random_vec(rhs, Edge_Partition.Nlocal, ml_edges->comm);

#ifdef AZTEC
  /* Choose the Aztec solver and criteria. Also tell Aztec that */
  /* ML will be supplying the preconditioner.                   */

  AZ_defaults(options, params);
  options[AZ_solver]   = AZ_fixed_pt;
  options[AZ_solver]   = AZ_gmres;
  options[AZ_kspace]   = 80;
  params[AZ_tol]       = tolerance;
  AZ_set_ML_preconditioner(&Pmat, Ke_mat, ml_edges, options); 
  options[AZ_conv] = AZ_noscaled;
  AZ_iterate(xxx, rhs, options, params, status, proc_config, Ke_mat, Pmat, NULL);
#else
  ML_Iterate(ml_edges, xxx, rhs);
#endif


  /* clean up. */

  ML_Smoother_Arglist_Delete(&nodal_args);
  ML_Smoother_Arglist_Delete(&edge_args);
  ML_Aggregate_Destroy(&ag);
  ML_Destroy(&ml_edges);
  ML_Destroy(&ml_nodes);
#ifdef AZTEC
  AZ_free((void *) Ke_mat->data_org);
  AZ_free((void *) Ke_mat->val);
  AZ_free((void *) Ke_mat->bindx);
  if (Ke_mat  != NULL) AZ_matrix_destroy(&Ke_mat);
  if (Pmat  != NULL) AZ_precond_destroy(&Pmat);
  if (Kn_mat != NULL) AZ_matrix_destroy(&Kn_mat);
#endif
  free(xxx);
  free(rhs);
  ML_Operator_Destroy(&Tmat);
  ML_Operator_Destroy(&Tmat_trans);
  ML_MGHierarchy_ReitzingerDestroy(MaxMgLevels-2, &Tmat_array, &Tmat_trans_array);

#ifdef ML_MPI
  MPI_Finalize();
#endif
		
  return 0;
		
}
예제 #6
0
파일: ml.c 프로젝트: Kun-Qu/petsc
PetscErrorCode PCSetUp_ML(PC pc)
{
  PetscErrorCode  ierr;
  PetscMPIInt     size;
  FineGridCtx     *PetscMLdata;
  ML              *ml_object;
  ML_Aggregate    *agg_object;
  ML_Operator     *mlmat;
  PetscInt        nlocal_allcols,Nlevels,mllevel,level,level1,m,fine_level,bs;
  Mat             A,Aloc; 
  GridCtx         *gridctx; 
  PC_MG           *mg = (PC_MG*)pc->data;
  PC_ML           *pc_ml = (PC_ML*)mg->innerctx;
  PetscBool       isSeq, isMPI;
  KSP             smoother;
  PC              subpc;
  PetscInt        mesh_level, old_mesh_level;

  PetscFunctionBegin;
  A = pc->pmat;
  ierr = MPI_Comm_size(((PetscObject)A)->comm,&size);CHKERRQ(ierr);

  if (pc->setupcalled) {
    if (pc->flag == SAME_NONZERO_PATTERN && pc_ml->reuse_interpolation) {
      /*
       Reuse interpolaton instead of recomputing aggregates and updating the whole hierarchy. This is less expensive for
       multiple solves in which the matrix is not changing too quickly.
       */
      ml_object = pc_ml->ml_object;
      gridctx = pc_ml->gridctx;
      Nlevels = pc_ml->Nlevels;
      fine_level = Nlevels - 1;
      gridctx[fine_level].A = A;

      ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
      ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
      if (isMPI){
        ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
      } else if (isSeq) {
        Aloc = A;
        ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
      } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);

      ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
      PetscMLdata = pc_ml->PetscMLdata;
      ierr = MatDestroy(&PetscMLdata->Aloc);CHKERRQ(ierr);
      PetscMLdata->A    = A;
      PetscMLdata->Aloc = Aloc;
      ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
      ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);

      mesh_level = ml_object->ML_finest_level;
      while (ml_object->SingleLevel[mesh_level].Rmat->to) {
        old_mesh_level = mesh_level;
        mesh_level = ml_object->SingleLevel[mesh_level].Rmat->to->levelnum;

        /* clean and regenerate A */
        mlmat = &(ml_object->Amat[mesh_level]);
        ML_Operator_Clean(mlmat);
        ML_Operator_Init(mlmat,ml_object->comm);
        ML_Gen_AmatrixRAP(ml_object, old_mesh_level, mesh_level);
      }

      level = fine_level - 1;
      if (size == 1) { /* convert ML P, R and A into seqaij format */
        for (mllevel=1; mllevel<Nlevels; mllevel++){
          mlmat = &(ml_object->Amat[mllevel]);
          ierr = MatWrapML_SeqAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
          level--;
        }
      } else { /* convert ML P and R into shell format, ML A into mpiaij format */
        for (mllevel=1; mllevel<Nlevels; mllevel++){
          mlmat  = &(ml_object->Amat[mllevel]);
          ierr = MatWrapML_MPIAIJ(mlmat,MAT_REUSE_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
          level--;
        }
      }

      for (level=0; level<fine_level; level++) {
        if (level > 0){
          ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
        }
        ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);
      }
      ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr);
      ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,SAME_NONZERO_PATTERN);CHKERRQ(ierr);

      ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
      PetscFunctionReturn(0);
    } else {
      /* since ML can change the size of vectors/matrices at any level we must destroy everything */
      ierr = PCReset_ML(pc);CHKERRQ(ierr);
      ierr = PCReset_MG(pc);CHKERRQ(ierr);
    }
  }

  /* setup special features of PCML */
  /*--------------------------------*/
  /* covert A to Aloc to be used by ML at fine grid */
  pc_ml->size = size;
  ierr = PetscObjectTypeCompare((PetscObject) A, MATSEQAIJ, &isSeq);CHKERRQ(ierr);
  ierr = PetscObjectTypeCompare((PetscObject) A, MATMPIAIJ, &isMPI);CHKERRQ(ierr);
  if (isMPI){ 
    ierr = MatConvert_MPIAIJ_ML(A,PETSC_NULL,MAT_INITIAL_MATRIX,&Aloc);CHKERRQ(ierr);
  } else if (isSeq) {
    Aloc = A;
    ierr = PetscObjectReference((PetscObject)Aloc);CHKERRQ(ierr);
  } else SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_WRONG, "Matrix type '%s' cannot be used with ML. ML can only handle AIJ matrices.",((PetscObject)A)->type_name);

  /* create and initialize struct 'PetscMLdata' */
  ierr = PetscNewLog(pc,FineGridCtx,&PetscMLdata);CHKERRQ(ierr); 
  pc_ml->PetscMLdata = PetscMLdata;
  ierr = PetscMalloc((Aloc->cmap->n+1)*sizeof(PetscScalar),&PetscMLdata->pwork);CHKERRQ(ierr); 

  ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->x);CHKERRQ(ierr);   
  ierr = VecSetSizes(PetscMLdata->x,Aloc->cmap->n,Aloc->cmap->n);CHKERRQ(ierr);
  ierr = VecSetType(PetscMLdata->x,VECSEQ);CHKERRQ(ierr); 

  ierr = VecCreate(PETSC_COMM_SELF,&PetscMLdata->y);CHKERRQ(ierr); 
  ierr = VecSetSizes(PetscMLdata->y,A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
  ierr = VecSetType(PetscMLdata->y,VECSEQ);CHKERRQ(ierr);
  PetscMLdata->A    = A;
  PetscMLdata->Aloc = Aloc;
   
  /* create ML discretization matrix at fine grid */
  /* ML requires input of fine-grid matrix. It determines nlevels. */
  ierr = MatGetSize(Aloc,&m,&nlocal_allcols);CHKERRQ(ierr);
  ierr = MatGetBlockSize(A,&bs);CHKERRQ(ierr);
  ML_Create(&ml_object,pc_ml->MaxNlevels);
  ML_Comm_Set_UsrComm(ml_object->comm,((PetscObject)A)->comm);
  pc_ml->ml_object = ml_object;
  ML_Init_Amatrix(ml_object,0,m,m,PetscMLdata);
  ML_Set_Amatrix_Getrow(ml_object,0,PetscML_getrow,PetscML_comm,nlocal_allcols); 
  ML_Set_Amatrix_Matvec(ml_object,0,PetscML_matvec);

  ML_Set_Symmetrize(ml_object,pc_ml->Symmetrize ? ML_YES : ML_NO);

  /* aggregation */
  ML_Aggregate_Create(&agg_object); 
  pc_ml->agg_object = agg_object;

  {
    MatNullSpace mnull;
    ierr = MatGetNearNullSpace(A,&mnull);CHKERRQ(ierr);
    if (pc_ml->nulltype == PCML_NULLSPACE_AUTO) {
      if (mnull) pc_ml->nulltype = PCML_NULLSPACE_USER;
      else if (bs > 1) pc_ml->nulltype = PCML_NULLSPACE_BLOCK;
      else pc_ml->nulltype = PCML_NULLSPACE_SCALAR;
    }
    switch (pc_ml->nulltype) {
    case PCML_NULLSPACE_USER: {
      PetscScalar *nullvec;
      const PetscScalar *v;
      PetscBool has_const;
      PetscInt i,j,mlocal,nvec,M;
      const Vec *vecs;
      if (!mnull) SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_USER,"Must provide explicit null space using MatSetNearNullSpace() to use user-specified null space");
      ierr = MatGetSize(A,&M,PETSC_NULL);CHKERRQ(ierr);
      ierr = MatGetLocalSize(Aloc,&mlocal,PETSC_NULL);CHKERRQ(ierr);
      ierr = MatNullSpaceGetVecs(mnull,&has_const,&nvec,&vecs);CHKERRQ(ierr);
      ierr = PetscMalloc((nvec+!!has_const)*mlocal*sizeof *nullvec,&nullvec);CHKERRQ(ierr);
      if (has_const) for (i=0; i<mlocal; i++) nullvec[i] = 1.0/M;
      for (i=0; i<nvec; i++) {
        ierr = VecGetArrayRead(vecs[i],&v);CHKERRQ(ierr);
        for (j=0; j<mlocal; j++) nullvec[(i+!!has_const)*mlocal + j] = v[j];
        ierr = VecRestoreArrayRead(vecs[i],&v);CHKERRQ(ierr);
      }
      ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,nvec+!!has_const,nullvec,mlocal);CHKERRQ(ierr);
      ierr = PetscFree(nullvec);CHKERRQ(ierr);
    } break;
    case PCML_NULLSPACE_BLOCK:
      ierr = ML_Aggregate_Set_NullSpace(agg_object,bs,bs,0,0);CHKERRQ(ierr);
      break;
    case PCML_NULLSPACE_SCALAR:
      break;
    default: SETERRQ(((PetscObject)pc)->comm,PETSC_ERR_SUP,"Unknown null space type");
    }
  }
  ML_Aggregate_Set_MaxCoarseSize(agg_object,pc_ml->MaxCoarseSize);
  /* set options */
  switch (pc_ml->CoarsenScheme) { 
  case 1:  
    ML_Aggregate_Set_CoarsenScheme_Coupled(agg_object);break;
  case 2:
    ML_Aggregate_Set_CoarsenScheme_MIS(agg_object);break;
  case 3:
    ML_Aggregate_Set_CoarsenScheme_METIS(agg_object);break;
  }
  ML_Aggregate_Set_Threshold(agg_object,pc_ml->Threshold); 
  ML_Aggregate_Set_DampingFactor(agg_object,pc_ml->DampingFactor); 
  if (pc_ml->SpectralNormScheme_Anorm){
    ML_Set_SpectralNormScheme_Anorm(ml_object);
  }
  agg_object->keep_agg_information      = (int)pc_ml->KeepAggInfo;
  agg_object->keep_P_tentative          = (int)pc_ml->Reusable;
  agg_object->block_scaled_SA           = (int)pc_ml->BlockScaling;
  agg_object->minimizing_energy         = (int)pc_ml->EnergyMinimization;
  agg_object->minimizing_energy_droptol = (double)pc_ml->EnergyMinimizationDropTol;
  agg_object->cheap_minimizing_energy   = (int)pc_ml->EnergyMinimizationCheap;

  if (pc_ml->OldHierarchy) {
    Nlevels = ML_Gen_MGHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
  } else {
    Nlevels = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml_object,0,ML_INCREASING,agg_object);
  }
  if (Nlevels<=0) SETERRQ1(((PetscObject)pc)->comm,PETSC_ERR_ARG_OUTOFRANGE,"Nlevels %d must > 0",Nlevels);
  pc_ml->Nlevels = Nlevels;
  fine_level = Nlevels - 1;

  ierr = PCMGSetLevels(pc,Nlevels,PETSC_NULL);CHKERRQ(ierr); 
  /* set default smoothers */
  for (level=1; level<=fine_level; level++){
    if (size == 1){
      ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
      ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
      ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
      ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
    } else {
      ierr = PCMGGetSmoother(pc,level,&smoother);CHKERRQ(ierr);
      ierr = KSPSetType(smoother,KSPRICHARDSON);CHKERRQ(ierr);
      ierr = KSPGetPC(smoother,&subpc);CHKERRQ(ierr);
      ierr = PCSetType(subpc,PCSOR);CHKERRQ(ierr);
    }
  }
  ierr = PetscObjectOptionsBegin((PetscObject)pc);CHKERRQ(ierr);
  ierr = PCSetFromOptions_MG(pc);CHKERRQ(ierr); /* should be called in PCSetFromOptions_ML(), but cannot be called prior to PCMGSetLevels() */
  ierr = PetscOptionsEnd();CHKERRQ(ierr);

  ierr = PetscMalloc(Nlevels*sizeof(GridCtx),&gridctx);CHKERRQ(ierr);
  pc_ml->gridctx = gridctx;

  /* wrap ML matrices by PETSc shell matrices at coarsened grids.
     Level 0 is the finest grid for ML, but coarsest for PETSc! */
  gridctx[fine_level].A = A;

  level = fine_level - 1;
  if (size == 1){ /* convert ML P, R and A into seqaij format */
    for (mllevel=1; mllevel<Nlevels; mllevel++){ 
      mlmat = &(ml_object->Pmat[mllevel]);
      ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
      mlmat = &(ml_object->Rmat[mllevel-1]);
      ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);
      
      mlmat = &(ml_object->Amat[mllevel]);
      ierr  = MatWrapML_SeqAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);
      level--;
    }
  } else { /* convert ML P and R into shell format, ML A into mpiaij format */
    for (mllevel=1; mllevel<Nlevels; mllevel++){ 
      mlmat  = &(ml_object->Pmat[mllevel]);
      ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].P);CHKERRQ(ierr);
      mlmat  = &(ml_object->Rmat[mllevel-1]);
      ierr = MatWrapML_SHELL(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].R);CHKERRQ(ierr);

      mlmat  = &(ml_object->Amat[mllevel]);
      ierr = MatWrapML_MPIAIJ(mlmat,MAT_INITIAL_MATRIX,&gridctx[level].A);CHKERRQ(ierr);  
      level--;
    }
  }

  /* create vectors and ksp at all levels */
  for (level=0; level<fine_level; level++){  
    level1 = level + 1;
    ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].x);CHKERRQ(ierr); 
    ierr = VecSetSizes(gridctx[level].x,gridctx[level].A->cmap->n,PETSC_DECIDE);CHKERRQ(ierr);
    ierr = VecSetType(gridctx[level].x,VECMPI);CHKERRQ(ierr); 
    ierr = PCMGSetX(pc,level,gridctx[level].x);CHKERRQ(ierr); 
   
    ierr = VecCreate(((PetscObject)gridctx[level].A)->comm,&gridctx[level].b);CHKERRQ(ierr); 
    ierr = VecSetSizes(gridctx[level].b,gridctx[level].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
    ierr = VecSetType(gridctx[level].b,VECMPI);CHKERRQ(ierr); 
    ierr = PCMGSetRhs(pc,level,gridctx[level].b);CHKERRQ(ierr); 
    
    ierr = VecCreate(((PetscObject)gridctx[level1].A)->comm,&gridctx[level1].r);CHKERRQ(ierr); 
    ierr = VecSetSizes(gridctx[level1].r,gridctx[level1].A->rmap->n,PETSC_DECIDE);CHKERRQ(ierr);
    ierr = VecSetType(gridctx[level1].r,VECMPI);CHKERRQ(ierr); 
    ierr = PCMGSetR(pc,level1,gridctx[level1].r);CHKERRQ(ierr);

    if (level == 0){
      ierr = PCMGGetCoarseSolve(pc,&gridctx[level].ksp);CHKERRQ(ierr);
    } else {
      ierr = PCMGGetSmoother(pc,level,&gridctx[level].ksp);CHKERRQ(ierr);
    }  
  }
  ierr = PCMGGetSmoother(pc,fine_level,&gridctx[fine_level].ksp);CHKERRQ(ierr);

  /* create coarse level and the interpolation between the levels */
  for (level=0; level<fine_level; level++){  
    level1 = level + 1;
    ierr = PCMGSetInterpolation(pc,level1,gridctx[level].P);CHKERRQ(ierr);
    ierr = PCMGSetRestriction(pc,level1,gridctx[level].R);CHKERRQ(ierr);     
    if (level > 0){
      ierr = PCMGSetResidual(pc,level,PCMGDefaultResidual,gridctx[level].A);CHKERRQ(ierr);
    }    
    ierr = KSPSetOperators(gridctx[level].ksp,gridctx[level].A,gridctx[level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);      
  }  
  ierr = PCMGSetResidual(pc,fine_level,PCMGDefaultResidual,gridctx[fine_level].A);CHKERRQ(ierr); 
  ierr = KSPSetOperators(gridctx[fine_level].ksp,gridctx[level].A,gridctx[fine_level].A,DIFFERENT_NONZERO_PATTERN);CHKERRQ(ierr);

  /* setupcalled is set to 0 so that MG is setup from scratch */
  pc->setupcalled = 0;  
  ierr = PCSetUp_MG(pc);CHKERRQ(ierr);
  PetscFunctionReturn(0);
}
예제 #7
0
HYPRE_Int HYPRE_ParCSRMLSetup( HYPRE_Solver solver, HYPRE_ParCSRMatrix A,
                         HYPRE_ParVector b,   HYPRE_ParVector x      )
{
    HYPRE_Int        i, my_id, nprocs, coarsest_level, level, sweeps, nlevels;
    HYPRE_Int        *row_partition, localEqns, length;
    HYPRE_Int        Nblocks, *blockList;
    double     wght;
    MH_Context *context;
    MH_Matrix  *mh_mat;

    /* -------------------------------------------------------- */ 
    /* fetch the ML pointer                                     */
    /* -------------------------------------------------------- */ 

    MH_Link *link = (MH_Link *) solver;
    ML      *ml   = link->ml_ptr;
    nlevels       = link->nlevels;
   
    /* -------------------------------------------------------- */ 
    /* set up the parallel environment                          */
    /* -------------------------------------------------------- */ 

    hypre_MPI_Comm_rank(link->comm, &my_id);
    hypre_MPI_Comm_size(link->comm, &nprocs);

    /* -------------------------------------------------------- */ 
    /* fetch the matrix row partition information and put it    */
    /* into the matrix data object (for matvec and getrow)      */
    /* -------------------------------------------------------- */ 

    HYPRE_ParCSRMatrixGetRowPartitioning( A, &row_partition );
    localEqns  = row_partition[my_id+1] - row_partition[my_id];
    context = (MH_Context *) malloc(sizeof(MH_Context));
    link->contxt = context;
    context->comm = link->comm;
    context->globalEqns = row_partition[nprocs];
    context->partition = (HYPRE_Int *) malloc(sizeof(HYPRE_Int)*(nprocs+1));
    for (i=0; i<=nprocs; i++) context->partition[i] = row_partition[i];
    hypre_TFree( row_partition );
    mh_mat = ( MH_Matrix * ) malloc( sizeof( MH_Matrix) );
    context->Amat = mh_mat;
    HYPRE_ParCSRMLConstructMHMatrix(A,mh_mat,link->comm,
                                    context->partition,context); 

    /* -------------------------------------------------------- */ 
    /* set up the ML communicator information                   */
    /* -------------------------------------------------------- */ 

    ML_Set_Comm_Communicator(ml, link->comm);
    ML_Set_Comm_MyRank(ml, my_id);
    ML_Set_Comm_Nprocs(ml, nprocs);
    ML_Set_Comm_Send(ml, MH_Send);
    ML_Set_Comm_Recv(ml, MH_Irecv);
    ML_Set_Comm_Wait(ml, MH_Wait);

    /* -------------------------------------------------------- */ 
    /* set up the ML matrix information                         */
    /* -------------------------------------------------------- */ 

    ML_Init_Amatrix(ml, nlevels-1, localEqns, localEqns, (void *) context);
    ML_Set_Amatrix_Matvec(ml, nlevels-1, MH_MatVec);
    length = localEqns;
    for (i=0; i<mh_mat->recvProcCnt; i++ ) length += mh_mat->recvLeng[i];
    ML_Set_Amatrix_Getrow(ml, nlevels-1, MH_GetRow, MH_ExchBdry, length);

    /* -------------------------------------------------------- */ 
    /* create an aggregate context                              */
    /* -------------------------------------------------------- */ 

    ML_Aggregate_Create(&(link->ml_ag));
    link->ml_ag->max_levels = link->nlevels;
    ML_Aggregate_Set_Threshold( link->ml_ag, link->ag_threshold );

    /* -------------------------------------------------------- */ 
    /* perform aggregation                                      */
    /* -------------------------------------------------------- */ 

    coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, nlevels-1, 
                                        ML_DECREASING, link->ml_ag);
    if ( my_id == 0 )
       hypre_printf("ML : number of levels = %d\n", coarsest_level);

    coarsest_level = nlevels - coarsest_level;

    /* -------------------------------------------------------- */ 
    /* set up smoother and coarse solver                        */
    /* -------------------------------------------------------- */ 

    for (level = nlevels-1; level > coarsest_level; level--) 
    {
       sweeps = link->pre_sweeps;
       wght   = link->jacobi_wt;
       switch ( link->pre )
       {
          case 0 :
             ML_Gen_SmootherJacobi(ml, level, ML_PRESMOOTHER, sweeps, wght);
             break;
          case 1 :
             ML_Gen_SmootherGaussSeidel(ml, level, ML_PRESMOOTHER, sweeps);
             break;
          case 2 :
             ML_Gen_SmootherSymGaussSeidel(ml,level,ML_PRESMOOTHER,sweeps,1.0);
             break;
          case 3 :
             Nblocks = ML_Aggregate_Get_AggrCount( link->ml_ag, level );
             ML_Aggregate_Get_AggrMap( link->ml_ag, level, &blockList );
             ML_Gen_SmootherVBlockGaussSeidel(ml,level,ML_PRESMOOTHER,
                                              sweeps, Nblocks, blockList);
             break;
          case 4 :
             Nblocks = ML_Aggregate_Get_AggrCount( link->ml_ag, level );
             ML_Aggregate_Get_AggrMap( link->ml_ag, level, &blockList );
             ML_Gen_SmootherVBlockJacobi(ml,level,ML_PRESMOOTHER,
                                         sweeps, wght, Nblocks, blockList);
             break;
       }

       sweeps = link->post_sweeps;
       switch ( link->post )
       {
          case 0 :
             ML_Gen_SmootherJacobi(ml, level, ML_POSTSMOOTHER, sweeps, wght);
             break;
          case 1 :
             ML_Gen_SmootherGaussSeidel(ml, level, ML_POSTSMOOTHER, sweeps);
             break;
          case 2 :
             ML_Gen_SmootherSymGaussSeidel(ml,level,ML_POSTSMOOTHER,sweeps,1.0);
             break;
          case 3 :
             Nblocks = ML_Aggregate_Get_AggrCount( link->ml_ag, level );
             ML_Aggregate_Get_AggrMap( link->ml_ag, level, &blockList );
             ML_Gen_SmootherVBlockGaussSeidel(ml,level,ML_POSTSMOOTHER,
                                              sweeps, Nblocks, blockList);
             break;
          case 4 :
             Nblocks = ML_Aggregate_Get_AggrCount( link->ml_ag, level );
             ML_Aggregate_Get_AggrMap( link->ml_ag, level, &blockList );
             ML_Gen_SmootherVBlockJacobi(ml,level,ML_POSTSMOOTHER,
                                         sweeps, wght, Nblocks, blockList);
             break;
       }
    }

    ML_Gen_CoarseSolverSuperLU(ml, coarsest_level);
    //ML_Gen_SmootherGaussSeidel(ml, coarsest_level, ML_PRESMOOTHER, 100);

    ML_Gen_Solver(ml, ML_MGV, nlevels-1, coarsest_level);
   
    return 0;
}
예제 #8
0
파일: mlguide.c 프로젝트: 00liujj/trilinos
int main(int argc, char *argv[]){


   ML *ml_object;
   int i, N_grids = 3, N_levels;
   double sol[129], rhs[129];
   ML_Aggregate *agg_object;
   ML_Operator *data;
   ML_Krylov *kdata;

#ifdef ML_MPI
   MPI_Init(&argc,&argv);
#endif
   for (i = 0; i < 129; i++) sol[i] = 0.;
   for (i = 0; i < 129; i++) rhs[i] = 2.;


   ML_Create         (&ml_object, N_grids);

   ML_Init_Amatrix      (ml_object, 0,  129, 129, NULL);
   ML_Set_Amatrix_Getrow(ml_object, 0,  Poisson_getrow, NULL, 129);
   ML_Set_Amatrix_Matvec(ml_object, 0,  Poisson_matvec);
   ML_Set_PrintLevel(10);

   ML_Aggregate_Create(&agg_object);
   ML_Aggregate_Set_MaxCoarseSize(agg_object,1);
   N_levels = ML_Gen_MGHierarchy_UsingAggregation(ml_object, 0,
                                                  ML_INCREASING, agg_object);
   /******** Begin code to set a Jacobi smoother ******

   ML_Gen_Smoother_Jacobi(ml_object, ML_ALL_LEVELS, ML_PRESMOOTHER, 1, ML_DEFAULT);

    ******** End code to set a Jacobi smoother ******/

   /******** Begin code to set a user-defined smoother ******/
   ML_Get_Amatrix(ml_object, 0, &data);
   ML_Set_Smoother(ml_object, 0, ML_BOTH, data, user_smoothing,"mine");
   ML_Get_Amatrix(ml_object, 1, &data);
   ML_Set_Smoother(ml_object, 1, ML_BOTH, data, user_smoothing,"mine");
   ML_Get_Amatrix(ml_object, 2, &data);
   ML_Set_Smoother(ml_object, 2, ML_BOTH, data, user_smoothing,"mine");
   ML_Gen_Solver    (ml_object, ML_MGV, 0, N_levels-1);

   /* This example uses an internal CG solver within ML     */
   /* ML has limited Krylov methods support. It is intended */
   /* that ML be used with another package that supplies    */
   /* more sophisticated Krylov solver options (such as those */
   /* found in the Trilinos or Aztec packages.              */

   kdata = ML_Krylov_Create(ml_object->comm);
   ML_Krylov_Set_PrintFreq( kdata, 1 );
   ML_Krylov_Set_Method(kdata, ML_CG);
   ML_Krylov_Set_Amatrix(kdata, &(ml_object->Amat[0]));
   ML_Krylov_Set_PreconFunc(kdata, ML_MGVSolve_Wrapper);
   ML_Krylov_Set_Precon(kdata, ml_object);
   ML_Krylov_Set_Tolerance(kdata, 1.e-7);
   ML_Krylov_Solve(kdata, 129, rhs, sol);
   ML_Krylov_Destroy( &kdata );

   ML_Aggregate_Destroy(&agg_object);
   ML_Destroy(&ml_object);
   /******** End code to set a user-defined smoother ******/

   printf("answer is %e %e %e %e %e\n",sol[0],sol[1],sol[2],sol[3],sol[4]);

#ifdef ML_MPI
  MPI_Finalize();
#endif
  exit(EXIT_SUCCESS);
}