Beispiel #1
0
int ML_strcmp(char *input, char *string)
{
/* Similar to 'C' strcmp except this one converts everything to lower case. */
  int i;
  char *input_copy, *string_copy;

  input_copy = (char *) ML_allocate(sizeof(char)*(strlen(input)+1));
  string_copy = (char *) ML_allocate(sizeof(char)*(strlen(string)+1));
  strcpy(input_copy,input);
  strcpy(string_copy,string);
  i = 0;
  while ((input_copy[i] != '\0') && (input_copy[i] != '\n')) {
    if ((input_copy[i] >= 'A') && (input_copy[i] <= 'Z'))
       input_copy[i] = 'a' + input_copy[i] - 'A';
    i++;
  }
  i = 0;
  while ((string_copy[i] != '\0') && (string_copy[i] != '\n')) {
    if ((string_copy[i] >= 'A') && (string_copy[i] <= 'Z'))
       string_copy[i] = 'a' + string_copy[i] - 'A';
    i++;
  }

  i = strcmp(input_copy, string_copy);
  ML_free(input_copy);
  ML_free(string_copy);

  return(i);
}
Beispiel #2
0
int ML_Reitzinger_Check_Hierarchy(ML *ml, ML_Operator **Tmat_array, int incr_or_decr)
{
  int i,j;
  int finest_level, coarsest_level;
  ML_Operator *Amat, *Tmat;
  double *randvec, *result, *result1;
  double dnorm;

  finest_level = ml->ML_finest_level;
  coarsest_level = ml->ML_coarsest_level;

  if (incr_or_decr == ML_INCREASING) {
    if (ml->comm->ML_mypid == 0) {
      printf("ML_Reitzinger_Check_Hierarchy: ML_INCREASING is not supported ");
      printf(" at this time.  Not checking hierarchy.\n");
    }
    return 1;
  }

  if ( ML_Get_PrintLevel() > 5 ) {
    printf("ML_Reitzinger_Check_Hierarchy: Checking null space\n");
  }

  for (i=finest_level; i>coarsest_level; i--) {

     Amat = ml->Amat+i;
     Tmat = Tmat_array[i];

     /* normalized random vector */
     randvec = (double *) ML_allocate(Tmat->invec_leng * sizeof(double) );
     ML_random_vec(randvec,Tmat->invec_leng, ml->comm);
     dnorm = sqrt( ML_gdot(Tmat->invec_leng, randvec, randvec, ml->comm) );
     for (j=0; j<Tmat->invec_leng; j++) randvec[j] /=  dnorm;

     result = (double *) ML_allocate(Amat->invec_leng * sizeof(double) );
     result1 = (double *) ML_allocate(Amat->outvec_leng * sizeof(double) );

     ML_Operator_Apply(Tmat, Tmat->invec_leng, randvec,
                       Tmat->outvec_leng, result);
     ML_Operator_Apply(Amat, Amat->invec_leng, result,
                       Amat->outvec_leng, result1);

     dnorm = sqrt( ML_gdot(Amat->outvec_leng, result1, result1, ml->comm) );
     if ( (ML_Get_PrintLevel() > 5) && (ml->comm->ML_mypid == 0) ) {
       printf("Level %d: for random v,  ||S*T*v|| = %15.10e\n",i,dnorm);
     }

     ML_free(randvec);
     ML_free(result);
     ML_free(result1);
  }
  if ( (ML_Get_PrintLevel() > 5) && (ml->comm->ML_mypid == 0) ) printf("\n");

  return 0;

}
Beispiel #3
0
int ML_qr_fix_Create(const int nCoarseNod)
{
   int    i, nBytes;
   xCDeadNodDof = (ML_qr_fix*) ML_allocate(sizeof(ML_qr_fix));
   xCDeadNodDof->level         = 0;
   xCDeadNodDof->numDeadNodDof = 0;
   xCDeadNodDof->nDeadNodDof   = nCoarseNod;
   nBytes = (nCoarseNod+1)*sizeof(ML_QR_FIX_TYPE *);
   xCDeadNodDof->xDeadNodDof   = (ML_QR_FIX_TYPE *)ML_allocate(nBytes);
   
   for (i=0; i < nCoarseNod; i++) (xCDeadNodDof->xDeadNodDof)[i] = 0;

   return(0);
}
Beispiel #4
0
int ML_Smoother_Ifpack(ML_Smoother *sm,int inlen,double x[],int outlen,
		       double rhs[])
{
  ML_Smoother    *smooth_ptr = (ML_Smoother *) sm;
  void *Ifpack_Handle = smooth_ptr->smoother->data;
  double* x2 = NULL,* rhs2 = NULL;
  /*int i;*/
  int n, kk;
  int one_int = 1;
  double minus_one_double = -1.0;

  if (sm->init_guess == ML_NONZERO)
  {
    n = sm->my_level->Amat->invec_leng;
    assert (n == sm->my_level->Amat->outvec_leng);

    rhs2 = (double*) ML_allocate(sizeof(double) * (n + 1));
    x2   = (double*) ML_allocate(sizeof(double) * (n + 1));

    ML_Operator_Apply(sm->my_level->Amat, n, x, n, rhs2);
    DCOPY_F77(&n, x, &one_int, x2, &one_int);
    DAXPY_F77(&n, &minus_one_double, rhs, &one_int, rhs2, &one_int);
    ML_Ifpack_Solve(Ifpack_Handle, x2, rhs2);
    DAXPY_F77(&n, &minus_one_double, x2, &one_int, x, &one_int);

    ML_free(rhs2);
    ML_free(x2);
  }
  else
    ML_Ifpack_Solve(Ifpack_Handle, x, rhs);

  for (kk = 1; kk < sm->ntimes; kk++) {
    n = sm->my_level->Amat->invec_leng;
    assert (n == sm->my_level->Amat->outvec_leng);

    rhs2 = (double*) ML_allocate(sizeof(double) * (n + 1));
    x2 = (double*) ML_allocate(sizeof(double) * (n + 1));

    ML_Operator_Apply(sm->my_level->Amat, n, x, n, rhs2);
    DCOPY_F77(&n, x, &one_int, x2, &one_int);
    DAXPY_F77(&n, &minus_one_double, rhs, &one_int, rhs2, &one_int);
    ML_Ifpack_Solve(Ifpack_Handle, x2, rhs2);
    DAXPY_F77(&n, &minus_one_double, x2, &one_int, x, &one_int);

    ML_free(rhs2);
    ML_free(x2);
  }
  return 0;
} /* ML_Smoother_Ifpack */
Beispiel #5
0
void main(int argc, char *argv[])
{
   int i, j, nrows, *mat_ia, *mat_ja, startRow, mypid, scheme, globalN;
   int nprocs, nullDim, method;
   double *mat_a, *sol, *rhs;
   MLI_Solver *solver;

   MPI_Init(&argc, &argv);
   MPI_Comm_rank(MPI_COMM_WORLD, &mypid);
   MPI_Comm_size(MPI_COMM_WORLD, &nprocs);

   if ( mypid == 0 )
   {
      printf("Test program : no. of processors = %d\n", nprocs);
      printf("multigrid method (1-AMG,2-Smoothed Aggregation) : ");
      scanf("%d", &method);
      if ( method == 2 )
      {
         printf("Coarsen scheme (1-Uncoupled,2-Coupled) : ");
         scanf("%d", &scheme);
      } else scheme = 1;
   }
   MPI_Bcast(&method, 1, MPI_INT, 0, MPI_COMM_WORLD);
   MPI_Bcast(&scheme, 1, MPI_INT, 0, MPI_COMM_WORLD);
   if ( method != 1 && method != 2 ) method = 1;
   if ( scheme < 1 || scheme > 2 ) scheme = 1;

   solver = MLI_Solver_Create(MPI_COMM_WORLD);
   /*
   nrows = MLI_Solver_Get_IJAFromFile(solver,"matrix.data","rhs.data");
   MLI_Solver_Get_NullSpaceFromFile(solver,"rbm.data");
   */
   globalN = 128 * 128;
   nrows = ML_PDE_GenMat(solver,globalN);
   sol = (double *) ML_allocate( nrows * sizeof(double) );
   for ( i = 0; i < nrows; i++ ) sol[i] = 0.0;
   MLI_Solver_Set_MLNumLevels(solver, 7);
   MLI_Solver_Set_KrylovMethod(solver, 0);
   MLI_Solver_Set_MGMethod(solver,method);
   MLI_Solver_Set_CoarsenScheme(solver,scheme);
   if ( method == 1 ) MLI_Solver_Set_StrongThreshold(solver,0.25);
   else               MLI_Solver_Set_StrongThreshold(solver,0.08);
   MLI_Solver_Set_NumPreSmoothings(solver,1);
   MLI_Solver_Set_NumPostSmoothings(solver,1);
   MLI_Solver_Set_PreSmoother(solver,2);
   MLI_Solver_Set_PostSmoother(solver,2);
   MLI_Solver_Set_DampingFactor(solver,1.0);
   MLI_Solver_Setup(solver, sol);
   MLI_Solver_Solve(solver);

   MPI_Finalize();
}
// ====================================================================== 
MultiVector GetDiagonal(const Operator& A, const int offset)
{
  // FIXME
  if (A.GetDomainSpace() != A.GetRangeSpace())
    ML_THROW("Currently only square matrices are supported", -1);

  MultiVector D(A.GetDomainSpace());
  D = 0.0;
  
  ML_Operator* matrix = A.GetML_Operator();

  if (matrix->getrow == NULL)
    ML_THROW("getrow() not set!", -1);

  int row_length;
  int allocated = 128;
  int*    bindx = (int    *)  ML_allocate(allocated*sizeof(int   ));
  double* val   = (double *)  ML_allocate(allocated*sizeof(double));

  for (int i = 0 ; i < matrix->getrow->Nrows; i++) {
    int GlobalRow = A.GetGRID(i);
    ML_get_matrix_row(matrix, 1, &i, &allocated, &bindx, &val,
                      &row_length, 0);
    for  (int j = 0; j < row_length; j++) {
      D(i) = 0.0;
      if (A.GetGCID(bindx[j]) == GlobalRow + offset) {
        D(i) = val[j];
        break;
      }
    }
  }

  ML_free(val);
  ML_free(bindx);
  return (D);

}
Beispiel #7
0
void ML_Reader_ReadInput(char *cmd_file_name, struct reader_context **context)

/* Read the input file for multigrid driver according to the tumi/shadi
 * format (not written down anywhere) and set algorithm and problem dependent
 * parameters accordingly.
 *
 *    Authors:  John N. Shadid, 1421
 *              Harry K. Moffat 1421
 *              Scott A. Hutchinson 1421
 *              Gary L. Hennigan 1421
 */

{
  FILE *ifp;


  /*********************** BEGIN EXECUTION ***********************************/

  *context =(struct reader_context *) ML_allocate(sizeof(struct reader_context));
  ML_Reader_InitContext(*context);

  /* Open the command input file */

  if ( (ifp = fopen(cmd_file_name, "r")) != NULL) {
      ;
/*    (void) fprintf(stderr, "ML input mode\n"); */
  } else {
    (void) fprintf(stderr, "read_input_file: Can't open input file, %s,",
                   cmd_file_name);
    (void) fprintf(stderr, " for reading\n");
    exit(-1);
  }

  ML_Reader_GetGeneralSpecs(ifp, *context);
  ML_Reader_GetSolutionSpecs(ifp, *context);
  ML_Reader_GetAggregationSpecs(ifp, *context);


}
Beispiel #8
0
int main(int argc, char *argv[])
{
  int    Nnodes=16*16;              /* Total number of nodes in the problem.*/
                                    /* 'Nnodes' must be a perfect square.   */
  int    MaxMgLevels=6;             /* Maximum number of Multigrid Levels   */
  int    Nits_per_presmooth=1;      /* # of pre & post smoothings per level */
  double tolerance = 1.0e-8;        /* At convergence:                      */
                                    /*   ||r_k||_2 < tolerance ||r_0||_2    */
  int smoothPe_flag = ML_YES;       /* ML_YES: smooth tentative prolongator */
                                    /* ML_NO: don't smooth prolongator      */

  /***************************************************************************/
  /* Select Hiptmair relaxation subsmoothers for the nodal and edge problems */
  /* Choices include                                                         */
  /*   1) ML_Gen_Smoother_SymGaussSeidel: this corresponds to a processor    */
  /*      local version of symmetric Gauss-Seidel/SOR. The number of sweeps  */
  /*      can be set via either 'edge_its' or 'nodal_its'. The damping can   */
  /*      be set via 'edge_omega' or 'nodal_omega'. When set to ML_DDEFAULT, */
  /*      the damping is set to '1' on one processor. On multiple processors */
  /*      a lower damping value is set. This is needed to converge processor */
  /*      local SOR.                                                         */
  /*   2) ML_Gen_Smoother_Cheby: this corresponds to polynomial relaxation.    */
  /*      The degree of the polynomial is set via 'edge_its' or 'nodal_its'. */
  /*      If the degree is '-1', Marian Brezina's MLS polynomial is chosen.  */
  /*      Otherwise, a Chebyshev polynomial is used over high frequencies    */
  /*      [ lambda_max/alpha , lambda_max]. Lambda_max is computed. 'alpha'  */
  /*      is hardwired in this example to correspond to twice the ratio of   */
  /*      unknowns in the fine and coarse meshes.                            */
  /*                                                                         */
  /* Using 'hiptmair_type' (see comments below) it is also possible to choose*/
  /* when edge and nodal problems are relaxed within the Hiptmair smoother.  */
  /***************************************************************************/

  void  *edge_smoother=(void *)     /* Edge relaxation:                     */
               ML_Gen_Smoother_Cheby; /*   ML_Gen_Smoother_Cheby            */
                                    /*     ML_Gen_Smoother_SymGaussSeidel   */
  void *nodal_smoother=(void *)     /* Nodal relaxation                     */
               ML_Gen_Smoother_Cheby;/*     ML_Gen_Smoother_Cheby           */
                                    /*     ML_Gen_Smoother_SymGaussSeidel   */

  int  edge_its = 3;                /* Iterations or polynomial degree for  */
  int  nodal_its = 3;               /* edge/nodal subsmoothers.             */
  double nodal_omega = ML_DDEFAULT, /* SOR damping parameter for noda/edge  */
         edge_omega  = ML_DDEFAULT; /* subsmoothers (see comments above).   */
  int   hiptmair_type=HALF_HIPTMAIR;/* FULL_HIPTMAIR: each invokation       */
                                    /*     smoothes on edges, then nodes,   */
                                    /*     and then once again on edges.    */
                                    /* HALF_HIPTMAIR: each pre-invokation   */
                                    /*     smoothes on edges, then nodes.   */
                                    /*     Each post-invokation smoothes    */
                                    /*     on nodes then edges. .           */


  ML_Operator  *Tmat, *Tmat_trans, **Tmat_array, **Tmat_trans_array;
  ML           *ml_edges, *ml_nodes;
  ML_Aggregate *ag;
  int          Nfine_edge, Ncoarse_edge, Nfine_node, Ncoarse_node, Nlevels;
  int          level, coarsest_level, itmp;
  double       edge_coarsening_rate, node_coarsening_rate, *rhs, *xxx;
  void         **edge_args, **nodal_args;
  struct       user_partition Edge_Partition = {NULL, NULL,0,0}, 
                                Node_Partition = {NULL, NULL,0,0};
  struct Tmat_data Tmat_data;
int i, Ntotal;
 ML_Comm *comm;

  /* See Aztec User's Guide for information on these variables */

#ifdef AZTEC
  AZ_MATRIX    *Ke_mat, *Kn_mat;
  AZ_PRECOND   *Pmat = NULL;
  int          proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
  double       params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE];
#endif


  /* get processor information (proc id & # of procs) and set ML's printlevel. */

#ifdef ML_MPI
  MPI_Init(&argc,&argv);
#endif
#ifdef AZTEC
  AZ_set_proc_config(proc_config, COMMUNICATOR);
#endif
  ML_Set_PrintLevel(10);   /* set ML's output level: 0 gives least output */

  /* Set the # of global nodes/edges and partition both the edges and the */
  /* nodes over the processors. NOTE: I believe we assume that if an edge */
  /* is assigned to a processor at least one of its nodes must be also    */
  /* assigned to that processor.                                          */

  Node_Partition.Nglobal = Nnodes;
  Edge_Partition.Nglobal = Node_Partition.Nglobal*2;
  Node_Partition.type = NODE;
  Edge_Partition.type = EDGE;
#define perxodic
#ifdef periodic
Node_Partition.Nglobal += 2; 
#endif
  partition_edges(&Edge_Partition);
  partition_nodes(&Node_Partition);
xxx = (double *) ML_allocate((Edge_Partition.Nlocal+100)*sizeof(double)); 
rhs = (double *) ML_allocate((Edge_Partition.Nlocal+100)*sizeof(double)); 
 for (i = 0; i < Edge_Partition.Nlocal + 100; i++) xxx[i] = -1.;
 for (i = 0; i < Edge_Partition.Nlocal; i++) xxx[i] = (double) 
        Edge_Partition.my_global_ids[i];

update_ghost_edges(xxx, (void *) &Edge_Partition);


  /* Create an empty multigrid hierarchy and set the 'MaxMGLevels-1'th   */
  /* level discretization within this hierarchy to the ML matrix         */
  /* representing Ke (Maxwell edge discretization).                      */

  ML_Create(&ml_edges, MaxMgLevels);
#ifdef AZTEC
  /* Build Ke as an Aztec matrix. Use built-in function AZ_ML_Set_Amat() */
  /* to convert to an ML matrix and put in hierarchy.                    */

  Ke_mat = user_Ke_build(&Edge_Partition);
  AZ_ML_Set_Amat(ml_edges, MaxMgLevels-1, Edge_Partition.Nlocal,
      		 Edge_Partition.Nlocal, Ke_mat, proc_config);
#else
  /* Build Ke directly as an ML matrix.                                  */

  ML_Init_Amatrix      (ml_edges, MaxMgLevels-1, Edge_Partition.Nlocal,
			Edge_Partition.Nlocal, &Edge_Partition);

  Ntotal = Edge_Partition.Nlocal;
  if (Edge_Partition.nprocs == 2) Ntotal += Edge_Partition.Nghost;
  ML_Set_Amatrix_Getrow(ml_edges, MaxMgLevels-1,  Ke_getrow, update_ghost_edges, Ntotal);
  ML_Set_Amatrix_Matvec(ml_edges, MaxMgLevels-1,  Ke_matvec);

#endif



  /* Build an Aztec matrix representing an auxiliary nodal PDE problem.  */
  /* This should be a variable coefficient Poisson problem (with unknowns*/
  /* at the nodes). The coefficients should be chosen to reflect the     */
  /* conductivity of the original edge problems.                         */
  /* Create an empty multigrid hierarchy. Convert the Aztec matrix to an */
  /* ML matrix and put it in the 'MaxMGLevels-1' level of the hierarchy. */
  /* Note it is possible to multiply T'*T for get this matrix though this*/
  /* will not incorporate material properties.                           */

  ML_Create(&ml_nodes, MaxMgLevels);

#ifdef AZTEC
  Kn_mat = user_Kn_build( &Node_Partition);
  AZ_ML_Set_Amat(ml_nodes, MaxMgLevels-1, Node_Partition.Nlocal, 
		 Node_Partition.Nlocal, Kn_mat, proc_config);
#else
  ML_Init_Amatrix      (ml_nodes, MaxMgLevels-1 , Node_Partition.Nlocal,
			Node_Partition.Nlocal, &Node_Partition);
  Ntotal = Node_Partition.Nlocal;
  if (Node_Partition.nprocs == 2) Ntotal += Node_Partition.Nghost;
  ML_Set_Amatrix_Getrow(ml_nodes, MaxMgLevels-1,  Kn_getrow, update_ghost_nodes, Ntotal);
#endif

  /* Build an ML matrix representing the null space of the PDE problem. */
  /* This should be a discrete gradient (nodes to edges).               */

#ifdef AZTEC
    Tmat = user_T_build (&Edge_Partition, &Node_Partition, 
  		   &(ml_nodes->Amat[MaxMgLevels-1]));
#else
    Tmat = ML_Operator_Create(ml_nodes->comm);
    Tmat_data.edge = &Edge_Partition;
    Tmat_data.node = &Node_Partition;
    Tmat_data.Kn   = &(ml_nodes->Amat[MaxMgLevels-1]);

    ML_Operator_Set_ApplyFuncData( Tmat,	Node_Partition.Nlocal,
				   Edge_Partition.Nlocal, ML_EMPTY, (void *) &Tmat_data, 
				   Edge_Partition.Nlocal, NULL, 0);
    ML_Operator_Set_Getrow( Tmat, ML_INTERNAL, Edge_Partition.Nlocal,Tmat_getrow);
    ML_Operator_Set_ApplyFunc(Tmat, ML_INTERNAL, Tmat_matvec);
  ML_Comm_Create( &comm);

  ML_CommInfoOP_Generate( &(Tmat->getrow->pre_comm), update_ghost_nodes, 
			  &Node_Partition,comm, Tmat->invec_leng, 
			  Node_Partition.Nghost);
#endif


  /********************************************************************/
  /* Set some ML parameters.                                          */
  /*------------------------------------------------------------------*/
	
  ML_Set_ResidualOutputFrequency(ml_edges, 1);
  ML_Set_Tolerance(ml_edges, 1.0e-8);
  ML_Aggregate_Create( &ag );
  ML_Aggregate_Set_CoarsenScheme_Uncoupled(ag);
  ML_Aggregate_Set_DampingFactor(ag, 0.0); /* must use 0 for maxwell */
  ML_Aggregate_Set_MaxCoarseSize(ag, 30);
  ML_Aggregate_Set_Threshold(ag, 0.0);


  /********************************************************************/
  /*                      Set up Tmat_trans                           */
  /*------------------------------------------------------------------*/

  Tmat_trans = ML_Operator_Create(ml_edges->comm);
  ML_Operator_Transpose_byrow(Tmat, Tmat_trans);


  Nlevels=ML_Gen_MGHierarchy_UsingReitzinger(ml_edges, &ml_nodes,MaxMgLevels-1,
					     ML_DECREASING,ag,Tmat,Tmat_trans, 
					     &Tmat_array,&Tmat_trans_array, 
					     smoothPe_flag, 1.5);

  /* Set the Hiptmair subsmoothers */

  if (nodal_smoother == (void *) ML_Gen_Smoother_SymGaussSeidel) {
    nodal_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(nodal_args, 0, &nodal_its);
    ML_Smoother_Arglist_Set(nodal_args, 1, &nodal_omega);
  }
  if (edge_smoother == (void *) ML_Gen_Smoother_SymGaussSeidel) {
    edge_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(edge_args, 0, &edge_its);
    ML_Smoother_Arglist_Set(edge_args, 1, &edge_omega);
  }
  if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) {
    nodal_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(nodal_args, 0, &nodal_its);
    Nfine_node = Tmat_array[MaxMgLevels-1]->invec_leng;
    Nfine_node = ML_gsum_int(Nfine_node, ml_edges->comm);
  }
  if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) {
    edge_args = ML_Smoother_Arglist_Create(2);
    ML_Smoother_Arglist_Set(edge_args, 0, &edge_its);
    Nfine_edge = Tmat_array[MaxMgLevels-1]->outvec_leng;
    Nfine_edge = ML_gsum_int(Nfine_edge, ml_edges->comm);
  }

  /****************************************************
  * Set up smoothers for all levels but the coarsest. *
  ****************************************************/
  coarsest_level = MaxMgLevels - Nlevels;

  for (level = MaxMgLevels-1; level > coarsest_level; level--)
    {
      if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) {
	Ncoarse_edge = Tmat_array[level-1]->outvec_leng;
	Ncoarse_edge = ML_gsum_int(Ncoarse_edge, ml_edges->comm);
	edge_coarsening_rate =  2.*((double) Nfine_edge)/ ((double) Ncoarse_edge);
	ML_Smoother_Arglist_Set(edge_args, 1, &edge_coarsening_rate);
	Nfine_edge = Ncoarse_edge;
      }
      if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) {
	Ncoarse_node = Tmat_array[level-1]->invec_leng;
	Ncoarse_node = ML_gsum_int(Ncoarse_node, ml_edges->comm);
	node_coarsening_rate =  2.*((double) Nfine_node)/ ((double) Ncoarse_node);
	ML_Smoother_Arglist_Set(nodal_args, 1, &node_coarsening_rate);
	Nfine_node = Ncoarse_node;
      }
      ML_Gen_Smoother_Hiptmair(ml_edges, level, ML_BOTH, Nits_per_presmooth,
			       Tmat_array, Tmat_trans_array, NULL, edge_smoother,
			       edge_args, nodal_smoother,nodal_args, hiptmair_type);
    }

  /*******************************************
  * Set up coarsest level smoother
  *******************************************/

  if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) {
    edge_coarsening_rate = (double) Nfine_edge;
    ML_Smoother_Arglist_Set(edge_args, 1, &edge_coarsening_rate);
  }
  if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) {
    node_coarsening_rate = (double) Nfine_node;
    ML_Smoother_Arglist_Set(nodal_args,1,&node_coarsening_rate);
  }
  ML_Gen_CoarseSolverSuperLU( ml_edges, coarsest_level);
  

  /* Must be called before invoking the preconditioner */
  ML_Gen_Solver(ml_edges, ML_MGV, MaxMgLevels-1, coarsest_level); 



  /* Set the initial guess and the right hand side. Invoke solver */	

  xxx = (double *) ML_allocate(Edge_Partition.Nlocal*sizeof(double)); 
  ML_random_vec(xxx, Edge_Partition.Nlocal, ml_edges->comm);
  rhs = (double *) ML_allocate(Edge_Partition.Nlocal*sizeof(double)); 
  ML_random_vec(rhs, Edge_Partition.Nlocal, ml_edges->comm);

#ifdef AZTEC
  /* Choose the Aztec solver and criteria. Also tell Aztec that */
  /* ML will be supplying the preconditioner.                   */

  AZ_defaults(options, params);
  options[AZ_solver]   = AZ_fixed_pt;
  options[AZ_solver]   = AZ_gmres;
  options[AZ_kspace]   = 80;
  params[AZ_tol]       = tolerance;
  AZ_set_ML_preconditioner(&Pmat, Ke_mat, ml_edges, options); 
  options[AZ_conv] = AZ_noscaled;
  AZ_iterate(xxx, rhs, options, params, status, proc_config, Ke_mat, Pmat, NULL);
#else
  ML_Iterate(ml_edges, xxx, rhs);
#endif


  /* clean up. */

  ML_Smoother_Arglist_Delete(&nodal_args);
  ML_Smoother_Arglist_Delete(&edge_args);
  ML_Aggregate_Destroy(&ag);
  ML_Destroy(&ml_edges);
  ML_Destroy(&ml_nodes);
#ifdef AZTEC
  AZ_free((void *) Ke_mat->data_org);
  AZ_free((void *) Ke_mat->val);
  AZ_free((void *) Ke_mat->bindx);
  if (Ke_mat  != NULL) AZ_matrix_destroy(&Ke_mat);
  if (Pmat  != NULL) AZ_precond_destroy(&Pmat);
  if (Kn_mat != NULL) AZ_matrix_destroy(&Kn_mat);
#endif
  free(xxx);
  free(rhs);
  ML_Operator_Destroy(&Tmat);
  ML_Operator_Destroy(&Tmat_trans);
  ML_MGHierarchy_ReitzingerDestroy(MaxMgLevels-2, &Tmat_array, &Tmat_trans_array);

#ifdef ML_MPI
  MPI_Finalize();
#endif
		
  return 0;
		
}
Beispiel #9
0
ML_Operator *user_T_build(struct user_partition *Edge_Partition,
		      struct user_partition *Node_Partition,
		      ML_Operator *Kn_mat)
{

  int nx, i, ii, jj, horv, Ncols, Nexterns;
  int *Tmat_bindx;
  double *Tmat_val;
  ML_Operator *Tmat;
  struct ML_CSR_MSRdata *csr_data;
  ML_Comm *comm;
  struct aztec_context *aztec_context;
  int global_id;
  int Nlocal_nodes, Nlocal_edges; 
  int nz_ptr;

  Nlocal_nodes = Node_Partition->Nlocal;
  Nlocal_edges = Edge_Partition->Nlocal;

  nx = (int) sqrt( ((double) Node_Partition->Nglobal) + .00001);
#ifdef periodic
  nx = (int) sqrt( ((double) Node_Partition->Nglobal - 2) + .00001); 
#endif

  Tmat_bindx = (int    *) malloc((6*Nlocal_edges+5)*sizeof(int));
  Tmat_val   = (double *) malloc((6*Nlocal_edges+5)*sizeof(double));

  Tmat_bindx[0] = Nlocal_edges + 1;
  for (i = 0; i < Nlocal_edges; i++) {
    global_id = (Edge_Partition->my_global_ids)[i];
    Tmat_bindx[i+1] = Tmat_bindx[i] + 2;
#ifdef periodic
Tmat_bindx[i+1] += 1; 
#endif
    Tmat_val[i] = 0.0;

    inv2dindex(global_id, &ii, &jj, nx, &horv);
    nz_ptr = Tmat_bindx[i];
    if (horv == HORIZONTAL) {
      Tmat_bindx[nz_ptr] = southwest2d(ii,jj,nx);  Tmat_val[nz_ptr++] = -1.;
      Tmat_bindx[nz_ptr] = southeast2d(ii,jj,nx);  Tmat_val[nz_ptr++] = 1.;
#ifdef periodic
      Tmat_bindx[nz_ptr] = Nlocal_nodes-2;         Tmat_val[nz_ptr++] = 1.;
#endif
    }
    else {
      Tmat_bindx[nz_ptr] = northwest2d(ii,jj,nx);  Tmat_val[nz_ptr++] = -1.;
      Tmat_bindx[nz_ptr] = southwest2d(ii,jj,nx);  Tmat_val[nz_ptr++] =  1.;
#ifdef periodic
      Tmat_bindx[nz_ptr] = Nlocal_nodes - 1;       Tmat_val[nz_ptr++] = 1.;
#endif
    }
  }

  /* Convert the MSR matrix to a CSR matrix. Then use a modified Aztec */
  /* routine to convert the global CSR matrix to a local ML matrix     */
  /* Since this routine does not compute the communication structure,  */
  /* we assume that it is identical to Kn's and just clone it.         */

  csr_data = (struct ML_CSR_MSRdata *) ML_allocate(sizeof(struct ML_CSR_MSRdata));
  csr_data->columns = Tmat_bindx;
  csr_data->values  = Tmat_val;
  ML_MSR2CSR(csr_data, Nlocal_edges, &Ncols);
  aztec_context = (struct aztec_context *) Kn_mat->data;
  Nexterns = (aztec_context->Amat->data_org)[AZ_N_external];

  Nexterns = 0;

  ML_Comm_Create( &comm);

  AZ_Tmat_transform2ml(Nexterns,   Node_Partition->needed_external_ids,
		       reordered_node_externs,
		       Tmat_bindx, Tmat_val, csr_data->rowptr, Nlocal_nodes,
		       Node_Partition->my_global_ids,
		       comm, Nlocal_edges, &Tmat);
  ML_free(csr_data);
  Tmat->data_destroy = ML_CSR_MSRdata_Destroy;
  ML_CommInfoOP_Clone(&(Tmat->getrow->pre_comm), Kn_mat->getrow->pre_comm);

  return(Tmat);
}
Beispiel #10
0
int ML_memory_alloc( void **memptr, unsigned int leng, char *name )
{
   int  i, *int_ptr, nchunks, ndouble=sizeof(double);
   char *var_ptr;
   double *dptr;

   /* ----------------------------------------------------------------- */
   /* if the first time, initialize the bookkeeping array               */
   /* ----------------------------------------------------------------- */

   if (malloc_initialized == -1)
   { 
      for (i=0; i<MAX_MALLOC_LOG; i++) malloc_leng_log[i] = -1;
      malloc_leng_log[0] = 0;
      malloc_initialized = 0; 
   }

   /* ----------------------------------------------------------------- */
   /* check for error before allocating any storage space               */
   /* ----------------------------------------------------------------- */

/*
TAKING THIS OUT TO HANDLE CASE WHEN THERE ARE NO POINTS ON PROC
   else if (leng == 0)
   { 
      printf("ML_malloc warning : %s - leng = 0 (%d).\n",name,leng);
      (*memptr) = NULL;
      return 0;
   }
*/

   {

      /* -------------------------------------------------------------- */
      /* allocate more space than requested for error checking          */
      /* -------------------------------------------------------------- */

      nchunks = leng / ndouble;
      if ((nchunks * ndouble) < (int) leng) nchunks = nchunks + 3;
      else                            nchunks = nchunks + 2;
      var_ptr = (char *) ML_allocate(nchunks*ndouble);
      dptr = (double *) var_ptr;
      for (i = 0; i < nchunks; i++) dptr[i] = 0.;

      /* -------------------------------------------------------------- */
      /* if an error is returned from malloc, terminate execution       */
      /* -------------------------------------------------------------- */

      if (var_ptr == NULL)
      {
         int mypid=0;
#ifdef ML_MPI
         MPI_Comm_rank(MPI_COMM_WORLD,&mypid);
#endif
	 pr_error("(%d) %s: unable to allocate %d bytes to %s.\n",mypid, ML_FUNCTION_NAME, leng, name );
      }

      /* -------------------------------------------------------------- */
      /* check if there is any available slot in the  bookkeeping array */
      /* -------------------------------------------------------------- */

      for ( i = 1; i < MAX_MALLOC_LOG; i++ ) 
         if (malloc_leng_log[i] == -1) break;

      /* -------------------------------------------------------------- */
      /* if the reply is positive, register the allocation information  */ 
      /* -------------------------------------------------------------- */

      if ( i < MAX_MALLOC_LOG )
      {
         int_ptr    = (int *) var_ptr;
         (*int_ptr) = i + 1;
         int_ptr    = (int*) ((ml_size_t) var_ptr + nchunks*ndouble - ndouble);
         (*int_ptr) = i + 1;
         malloc_addr_log[i] = (ml_size_t) memptr;
         malloc_leng_log[i] = nchunks * ndouble;
         malloc_name_log[i][0] = name[0];
         malloc_name_log[i][1] = name[1];
         malloc_name_log[i][2] = name[2];
         var_ptr = (char*) ((ml_size_t) var_ptr + ndouble);
         (*memptr) = (void *) var_ptr; 
         return i;

      /* -------------------------------------------------------------- */
      /* otherwise, signal the inability to track error for future      */
      /* investigation of this allocation                               */
      /* -------------------------------------------------------------- */

      }
      else
      {
         /*
         if (malloc_initialized != 1) 
            printf("ML_malloc message : unable to track any more %s.\n",
                    name);
         */
         malloc_initialized = 1;
         int_ptr    = (int*) var_ptr;
         (*int_ptr) = -1;
         int_ptr    = (int*) ((ml_size_t) var_ptr + nchunks*ndouble - ndouble);
         (*int_ptr) = -1;
         var_ptr = (char*) ((ml_size_t) var_ptr + ndouble);
         (*memptr) = (void *) var_ptr; 
         return 0;
      }
   }
}
Beispiel #11
0
int ML_Aggregate_CoarsenUser(ML_Aggregate *ml_ag, ML_Operator *Amatrix, 
			      ML_Operator **Pmatrix, ML_Comm *comm)
{
  unsigned int nbytes, length;
  int     i, j,  k, Nrows, exp_Nrows;
  int     diff_level;
  int     aggr_count, index, mypid, num_PDE_eqns;
  int     *aggr_index = NULL, nullspace_dim;
  int     Ncoarse, count;
  int     *new_ia = NULL, *new_ja = NULL, new_Nrows;
  int     exp_Ncoarse;
  int     *aggr_cnt_array = NULL;
  int     level, index3, max_agg_size;
  int     **rows_in_aggs = NULL, lwork, info;
  double  *new_val = NULL, epsilon;
  double  *nullspace_vect = NULL, *qr_tmp = NULL;
  double  *tmp_vect = NULL, *work = NULL, *new_null = NULL;
  ML_SuperNode          *aggr_head = NULL, *aggr_curr, *supernode;
  struct ML_CSR_MSRdata *csr_data;
  int                   total_nz = 0;
  char str[80];

  int * graph_decomposition = NULL;
  ML_Aggregate_Viz_Stats * aggr_viz_and_stats;
  ML_Aggregate_Viz_Stats * grid_info;
  int Nprocs;
  char * unamalg_bdry = NULL;
  char* label;
  int N_dimensions;
  double* x_coord = NULL;
  double* y_coord = NULL;
  double* z_coord = NULL;

  /* ------------------- execution begins --------------------------------- */

  label =  ML_GetUserLabel();
  sprintf(str, "%s (level %d) :", label, ml_ag->cur_level);

  /* ============================================================= */
  /* get the machine information and matrix references             */
  /* ============================================================= */

  mypid                   = comm->ML_mypid;
  Nprocs                  = comm->ML_nprocs;
  epsilon                 = ml_ag->threshold;
  num_PDE_eqns            = ml_ag->num_PDE_eqns;
  nullspace_dim           = ml_ag->nullspace_dim;
  nullspace_vect          = ml_ag->nullspace_vect;
  Nrows                   = Amatrix->outvec_leng;

  if (mypid == 0 && 5 < ML_Get_PrintLevel()) {
    printf("%s num PDE eqns = %d\n",
           str,
           num_PDE_eqns);
  }

  /* ============================================================= */
  /* check the system size versus null dimension size              */
  /* ============================================================= */

  if ( Nrows % num_PDE_eqns != 0 )
  {
    printf("ML_Aggregate_CoarsenUser ERROR : Nrows must be multiples");
    printf(" of num_PDE_eqns.\n");
    exit(EXIT_FAILURE);
  }
  diff_level = ml_ag->max_levels - ml_ag->cur_level - 1;
  if ( diff_level > 0 ) num_PDE_eqns = nullspace_dim; /* ## 12/20/99 */

  /* ============================================================= */
  /* set up the threshold for weight-based coarsening              */
  /* ============================================================= */

  diff_level = ml_ag->begin_level - ml_ag->cur_level;
  if (diff_level == 0) 
    ml_ag->curr_threshold = ml_ag->threshold;
  epsilon = ml_ag->curr_threshold;
  ml_ag->curr_threshold *= 0.5;

  if (mypid == 0 && 7 < ML_Get_PrintLevel())
    printf("%s current eps = %e\n", str, epsilon);

  epsilon = epsilon * epsilon;

  ML_Operator_AmalgamateAndDropWeak(Amatrix, num_PDE_eqns, epsilon);
  Nrows /= num_PDE_eqns;

  exp_Nrows = Nrows;

  /* ********************************************************************** */
  /* allocate memory for aggr_index, which will contain the decomposition   */
  /* ********************************************************************** */

  nbytes = (Nrows*num_PDE_eqns) * sizeof(int);

  if ( nbytes > 0 ) {
    ML_memory_alloc((void**) &aggr_index, nbytes, "ACJ");
    if( aggr_index == NULL ) {
      fprintf( stderr,
              "*ML*ERR* not enough memory for %d bytes\n"
              "*ML*ERR* (file %s, line %d)\n",
              nbytes,
              __FILE__,
              __LINE__ );
      exit( EXIT_FAILURE );
    }
  }
  else              aggr_index = NULL;

  for( i=0 ; i<Nrows*num_PDE_eqns ; i++ ) aggr_index[i] = -1;

  unamalg_bdry = (char *) ML_allocate( sizeof(char) * (Nrows+1) );

  if( unamalg_bdry == NULL ) {
    fprintf( stderr,
            "*ML*ERR* on proc %d, not enough space for %d bytes\n"
            "*ML*ERR* (file %s, line %d)\n",
            mypid,
            (int)sizeof(char) * Nrows,
            __FILE__,
            __LINE__ );
    exit( EXIT_FAILURE );
  }

  N_dimensions = ml_ag->N_dimensions;
  grid_info = (ML_Aggregate_Viz_Stats*) Amatrix->to->Grid->Grid;
  x_coord = grid_info->x;

  if (N_dimensions > 1 && x_coord)
    y_coord = grid_info->y;
  else
    y_coord = 0;
  if (N_dimensions > 2 && x_coord)
    z_coord = grid_info->z;
  else
    z_coord = 0;

  aggr_count = ML_GetUserPartitions(Amatrix,unamalg_bdry,
                                    epsilon,
                                    x_coord,y_coord,z_coord,
                                    aggr_index,&total_nz);

#ifdef ML_MPI
  MPI_Allreduce( &Nrows, &i, 1, MPI_INT, MPI_SUM, Amatrix->comm->USR_comm );
  MPI_Allreduce( &aggr_count, &j, 1, MPI_INT, MPI_SUM, Amatrix->comm->USR_comm );
#else
  i = Nrows;
  j = aggr_count;
#endif

  if( mypid == 0 && 7 < ML_Get_PrintLevel() ) {
    printf("%s Using %d (block) aggregates (globally)\n",
           str,
           j );
    printf("%s # (block) aggre/ # (block) rows = %8.5f %% ( = %d / %d)\n",
           str,
           100.0*j/i,
           j, i);
  }

  j = ML_gsum_int( aggr_count, comm );
  if (mypid == 0 && 7 < ML_Get_PrintLevel())  {
    printf("%s %d (block) aggregates (globally)\n",
           str, j );
  }   

  /* ********************************************************************** */
  /* I allocate room to copy aggr_index and pass this value to the user,    */
  /* who will be able to analyze and visualize this after the construction  */
  /* of the levels. This way, the only price we have to pay for stats and   */
  /* viz is essentially a little bit of memory.                             */
  /* this memory will be cleaned with the object ML_Aggregate ml_ag.        */
  /* I set the pointers using the ML_Aggregate_Info structure. This is      */
  /* allocated using ML_Aggregate_Info_Setup(ml,MaxNumLevels)               */
  /* ********************************************************************** */

  if (Amatrix->to->Grid->Grid != NULL) {

    graph_decomposition = (int *)ML_allocate(sizeof(int)*(Nrows+1));
    if( graph_decomposition == NULL ) {
      fprintf( stderr,
              "*ML*ERR* Not enough memory for %d bytes\n"
              "*ML*ERR* (file %s, line %d)\n",
              (int)sizeof(int)*Nrows,
              __FILE__,
              __LINE__ );
      exit( EXIT_FAILURE );
    }

    for( i=0 ; i<Nrows ; i++ ) graph_decomposition[i] = aggr_index[i];

    aggr_viz_and_stats = (ML_Aggregate_Viz_Stats *) (Amatrix->to->Grid->Grid);
    aggr_viz_and_stats->graph_decomposition = graph_decomposition;
    aggr_viz_and_stats->Nlocal = Nrows;
    aggr_viz_and_stats->Naggregates = aggr_count;
    aggr_viz_and_stats->local_or_global = ML_LOCAL_INDICES;
    aggr_viz_and_stats->is_filled = ML_YES;
    aggr_viz_and_stats->Amatrix = Amatrix;
  }

  /* ********************************************************************** */
  /* take the decomposition as created by METIS and form the aggregates     */
  /* ********************************************************************** */

  total_nz = ML_Comm_GsumInt( comm, total_nz);
  i = ML_Comm_GsumInt( comm, Nrows);

  if ( mypid == 0 && 7 < ML_Get_PrintLevel())
    printf("%s Total (block) nnz = %d ( = %5.2f/(block)row)\n",
           str,
           total_nz,1.0*total_nz/i);

  if ( ml_ag->operator_complexity == 0.0 ) {
    ml_ag->fine_complexity = total_nz;
    ml_ag->operator_complexity = total_nz;
  }
  else ml_ag->operator_complexity += total_nz;

  /* fix aggr_index for num_PDE_eqns > 1 */

  for (i = Nrows - 1; i >= 0; i-- ) {
    for (j = num_PDE_eqns-1; j >= 0; j--) {
      aggr_index[i*num_PDE_eqns+j] = aggr_index[i];
    }
  }

  if ( mypid == 0 && 8 < ML_Get_PrintLevel())
  {
    printf("Calling ML_Operator_UnAmalgamateAndDropWeak\n");
    fflush(stdout);
  }

  ML_Operator_UnAmalgamateAndDropWeak(Amatrix, num_PDE_eqns, epsilon);

  Nrows      *= num_PDE_eqns;
  exp_Nrows  *= num_PDE_eqns;

  /* count the size of each aggregate */

  aggr_cnt_array = (int *) ML_allocate(sizeof(int)*(aggr_count+1));
  for (i = 0; i < aggr_count ; i++) aggr_cnt_array[i] = 0;
  for (i = 0; i < exp_Nrows; i++) {
    if (aggr_index[i] >= 0) {
      if( aggr_index[i] >= aggr_count ) {
        fprintf( stderr,
                "*ML*WRN* on process %d, something weird happened...\n"
                "*ML*WRN* node %d belong to aggregate %d (#aggr = %d)\n"
                "*ML*WRN* (file %s, line %d)\n",
                comm->ML_mypid,
                i,
                aggr_index[i],
                aggr_count,
                __FILE__,
                __LINE__ );
      } else {
        aggr_cnt_array[aggr_index[i]]++;
      }
    }
  }

  /* ============================================================= */
  /* Form tentative prolongator                                    */
  /* ============================================================= */

  Ncoarse = aggr_count;

  /* ============================================================= */
  /* check and copy aggr_index                                     */
  /* ------------------------------------------------------------- */

  level = ml_ag->cur_level;
  nbytes = (Nrows+1) * sizeof( int );
  ML_memory_alloc((void**) &(ml_ag->aggr_info[level]), nbytes, "AGl");
  count = aggr_count;
  for ( i = 0; i < Nrows; i+=num_PDE_eqns ) 
  {
    if ( aggr_index[i] >= 0 )
    {
      for ( j = 0; j < num_PDE_eqns; j++ ) 
        ml_ag->aggr_info[level][i+j] = aggr_index[i];
      if (aggr_index[i] >= count) count = aggr_index[i] + 1;
    }
    /*else
     *{
     *   printf("%d : CoarsenMIS error : aggr_index[%d] < 0\n",
     *          mypid,i);
     *   exit(1);
     *}*/
  }
  ml_ag->aggr_count[level] = count; /* for relaxing boundary points */ 

  /* ============================================================= */
  /* set up the new operator                                       */
  /* ------------------------------------------------------------- */

  new_Nrows = Nrows;
  exp_Ncoarse = Nrows;

  for ( i = 0; i < new_Nrows; i++ ) 
  {
    if ( aggr_index[i] >= exp_Ncoarse ) 
    {
      printf("*ML*WRN* index out of bound %d = %d(%d)\n",
             i, aggr_index[i], 
             exp_Ncoarse);
    }
  }
  nbytes = ( new_Nrows+1 ) * sizeof(int); 
  ML_memory_alloc((void**)&(new_ia), nbytes, "AIA");
  nbytes = ( new_Nrows+1)  * nullspace_dim * sizeof(int); 
  ML_memory_alloc((void**)&(new_ja), nbytes, "AJA");
  nbytes = ( new_Nrows+1)  * nullspace_dim * sizeof(double); 
  ML_memory_alloc((void**)&(new_val), nbytes, "AVA");
  for ( i = 0; i < new_Nrows*nullspace_dim; i++ ) new_val[i] = 0.0;

  /* ------------------------------------------------------------- */
  /* set up the space for storing the new null space               */
  /* ------------------------------------------------------------- */

  nbytes = (Ncoarse+1) * nullspace_dim * nullspace_dim * sizeof(double);
  ML_memory_alloc((void**)&(new_null),nbytes,"AGr");
  if( new_null == NULL ) {
    fprintf( stderr,
            "*ML*ERR* on process %d, not enough memory for %d bytes\n"
            "*ML*ERR* (file %s, line %d)\n",
            mypid,
            nbytes,
            __FILE__,
            __LINE__ );
    exit( EXIT_FAILURE );
  }

  for (i = 0; i < Ncoarse*nullspace_dim*nullspace_dim; i++) 
    new_null[i] = 0.0;

  /* ------------------------------------------------------------- */
  /* initialize the row pointer for the CSR prolongation operator  */
  /* (each row will have at most nullspace_dim nonzero entries)    */
  /* ------------------------------------------------------------- */

  for (i = 0; i <= Nrows; i++) new_ia[i] = i * nullspace_dim;

  /* trying this when a Dirichlet row is taken out */
  j = 0;
  new_ia[0] = 0;
  for (i = 0; i < Nrows; i++) {
    if (aggr_index[i] != -1) j += nullspace_dim;
    new_ia[i+1] = j;
  }

  /* ------------------------------------------------------------- */
  /* generate an array to store which aggregate has which rows.Then*/
  /* loop through the rows of A checking which aggregate each row  */
  /* is in, and adding it to the appropriate spot in rows_in_aggs  */
  /* ------------------------------------------------------------- */

  ML_memory_alloc((void**)&rows_in_aggs,aggr_count*sizeof(int*),"MLs");
  for (i = 0; i < aggr_count; i++) {
    nbytes = aggr_cnt_array[i]+1;
    rows_in_aggs[i] = (int *) ML_allocate(nbytes*sizeof(int));
    aggr_cnt_array[i] = 0;
    if (rows_in_aggs[i] == NULL)  {
      printf("*ML*ERR* couldn't allocate memory in CoarsenMETIS\n");
      exit(1);
    }
  }
  for (i = 0; i < exp_Nrows; i+=num_PDE_eqns) {
    if ( aggr_index[i] >= 0 && aggr_index[i] < aggr_count)
    {
      for (j = 0; j < num_PDE_eqns; j++)
      {
        index = aggr_cnt_array[aggr_index[i]]++; 
        rows_in_aggs[aggr_index[i]][index] = i + j;
      }
    }
  }

  /* ------------------------------------------------------------- */
  /* allocate work arrays for QR factorization                     */
  /* work and lwork are needed for lapack's QR routine.  These     */
  /* settings seemed easiest since I don't quite understand        */
  /* what they do, but may want to do something better here later  */
  /* ------------------------------------------------------------- */

  max_agg_size = 0;
  for (i = 0; i < aggr_count; i++) 
  {
    if (aggr_cnt_array[i] > max_agg_size) max_agg_size = aggr_cnt_array[i];
  }
  nbytes = max_agg_size * nullspace_dim * sizeof(double);
  ML_memory_alloc((void**)&qr_tmp, nbytes, "AGu");
  nbytes = nullspace_dim * sizeof(double);
  ML_memory_alloc((void**)&tmp_vect, nbytes, "AGv");

  lwork  = nullspace_dim;
  nbytes = nullspace_dim * sizeof(double);
  ML_memory_alloc((void**)&work, nbytes, "AGw");

  /* ------------------------------------------------------------- */
  /* perform block QR decomposition                                */
  /* ------------------------------------------------------------- */

  for (i = 0; i < aggr_count; i++) 
  {
    /* ---------------------------------------------------------- */
    /* set up the matrix we want to decompose into Q and R:       */
    /* ---------------------------------------------------------- */

    length = aggr_cnt_array[i];
    if (nullspace_vect == NULL) 
    {
      for (j = 0; j < (int) length; j++)
      {
        index = rows_in_aggs[i][j];

        for (k = 0; k < nullspace_dim; k++)
        {
          if ( unamalg_bdry[index/num_PDE_eqns] == 'T')
            qr_tmp[k*length+j] = 0.;
          else
          {
            if (index % num_PDE_eqns == k) qr_tmp[k*length+j] = 1.0;
            else                           qr_tmp[k*length+j] = 0.0;
          }
        }
      }
    }
    else 
    {
      for (k = 0; k < nullspace_dim; k++)
      {
        for (j = 0; j < (int) length; j++)
        {
          index = rows_in_aggs[i][j];
          if ( unamalg_bdry[index/num_PDE_eqns] == 'T')
            qr_tmp[k*length+j] = 0.;
          else {
            if (index < Nrows) {
              qr_tmp[k*length+j] = nullspace_vect[k*Nrows+index];
            }
            else {
              fprintf( stderr,
                      "*ML*ERR* in QR\n"
                      "*ML*ERR* (file %s, line %d)\n",
                      __FILE__,
                      __LINE__ );
              exit( EXIT_FAILURE );
            }
          }
        }
      }
    }

    /* ---------------------------------------------------------- */
    /* now calculate QR using an LAPACK routine                   */
    /* ---------------------------------------------------------- */

    if (aggr_cnt_array[i] >= nullspace_dim) {

      DGEQRF_F77(&(aggr_cnt_array[i]), &nullspace_dim, qr_tmp, 
                 &(aggr_cnt_array[i]), tmp_vect, work, &lwork, &info);
      if (info != 0)
        pr_error("ErrOr in CoarsenMIS : dgeqrf returned a non-zero %d %d\n",
                 aggr_cnt_array[i],i);

      if (work[0] > lwork) 
      {
        lwork=(int) work[0]; 
        ML_memory_free((void**) &work);
        ML_memory_alloc((void**) &work, sizeof(double)*lwork, "AGx");
      }
      else lwork=(int) work[0];

      /* ---------------------------------------------------------- */
      /* the upper triangle of qr_tmp is now R, so copy that into   */
      /* the new nullspace                                          */
      /* ---------------------------------------------------------- */

      for (j = 0; j < nullspace_dim; j++)
        for (k = j; k < nullspace_dim; k++)
          new_null[i*nullspace_dim+j+k*Ncoarse*nullspace_dim] = 
            qr_tmp[j+aggr_cnt_array[i]*k];

      /* ---------------------------------------------------------- */
      /* to get this block of P, need to run qr_tmp through another */
      /* LAPACK function:                                           */
      /* ---------------------------------------------------------- */

      if ( aggr_cnt_array[i] < nullspace_dim ){
        printf("Error in dorgqr on %d row (dims are %d, %d)\n",i,aggr_cnt_array[i],
               nullspace_dim);
        printf("ERROR : performing QR on a MxN matrix where M<N.\n");
      }
      DORGQR_F77(&(aggr_cnt_array[i]), &nullspace_dim, &nullspace_dim, 
                 qr_tmp, &(aggr_cnt_array[i]), tmp_vect, work, &lwork, &info);
      if (info != 0) {
        printf("Error in dorgqr on %d row (dims are %d, %d)\n",i,aggr_cnt_array[i],
               nullspace_dim);
        pr_error("Error in CoarsenMIS: dorgqr returned a non-zero\n");
      }

      if (work[0] > lwork) 
      {
        lwork=(int) work[0]; 
        ML_memory_free((void**) &work);
        ML_memory_alloc((void**) &work, sizeof(double)*lwork, "AGy");
      }
      else lwork=(int) work[0];

      /* ---------------------------------------------------------- */
      /* now copy Q over into the appropriate part of P:            */
      /* The rows of P get calculated out of order, so I assume the */
      /* Q is totally dense and use what I know of how big each Q   */
      /* will be to determine where in ia, ja, etc each nonzero in  */
      /* Q belongs.  If I did not assume this, I would have to keep */
      /* all of P in memory in order to determine where each entry  */
      /* should go                                                  */
      /* ---------------------------------------------------------- */

      for (j = 0; j < aggr_cnt_array[i]; j++)
      {
        index = rows_in_aggs[i][j];

        if ( index < Nrows )
        {
          index3 = new_ia[index];
          for (k = 0; k < nullspace_dim; k++) 
          {
            new_ja [index3+k] = i * nullspace_dim + k;
            new_val[index3+k] = qr_tmp[ k*aggr_cnt_array[i]+j];
          }
        }
        else 
        {
          fprintf( stderr,
                  "*ML*ERR* in QR: index out of bounds (%d - %d)\n",
                  index,
                  Nrows );
        }
      }
    }
    else {
      /* We have a small aggregate such that the QR factorization can not */
      /* be performed. Instead let us copy the null space from the fine   */
      /* into the coarse grid nullspace and put the identity for the      */
      /* prolongator????                                                  */
      for (j = 0; j < nullspace_dim; j++)
        for (k = 0; k < nullspace_dim; k++)
          new_null[i*nullspace_dim+j+k*Ncoarse*nullspace_dim] = 
            qr_tmp[j+aggr_cnt_array[i]*k];
      for (j = 0; j < aggr_cnt_array[i]; j++) {
        index = rows_in_aggs[i][j];
        index3 = new_ia[index];
        for (k = 0; k < nullspace_dim; k++) {
          new_ja [index3+k] = i * nullspace_dim + k;
          if (k == j) new_val[index3+k] = 1.;
          else new_val[index3+k] = 0.;
        }
      }
    }


  }

  ML_Aggregate_Set_NullSpace(ml_ag, num_PDE_eqns, nullspace_dim, 
                             new_null, Ncoarse*nullspace_dim);
  ML_memory_free( (void **) &new_null);

  /* ------------------------------------------------------------- */
  /* set up the csr_data data structure                            */
  /* ------------------------------------------------------------- */

  ML_memory_alloc((void**) &csr_data, sizeof(struct ML_CSR_MSRdata),"CSR");
  csr_data->rowptr  = new_ia;
  csr_data->columns = new_ja;
  csr_data->values  = new_val;

  ML_Operator_Set_ApplyFuncData( *Pmatrix, nullspace_dim*Ncoarse, Nrows, 
                                csr_data, Nrows, NULL, 0);
  (*Pmatrix)->data_destroy = ML_CSR_MSR_ML_memorydata_Destroy;
  (*Pmatrix)->getrow->pre_comm = ML_CommInfoOP_Create();
  (*Pmatrix)->max_nz_per_row = 1;

  ML_Operator_Set_Getrow((*Pmatrix), Nrows, CSR_getrow);
  ML_Operator_Set_ApplyFunc((*Pmatrix), CSR_matvec);
  (*Pmatrix)->max_nz_per_row = 1;
  /* this must be set so that the hierarchy generation does not abort early
     in adaptive SA */
  (*Pmatrix)->num_PDEs = nullspace_dim;

  /* ------------------------------------------------------------- */
  /* clean up                                                      */
  /* ------------------------------------------------------------- */

  ML_free(unamalg_bdry);
  ML_memory_free((void**)&aggr_index);
  ML_free(aggr_cnt_array);
  for (i = 0; i < aggr_count; i++) ML_free(rows_in_aggs[i]);
  ML_memory_free((void**)&rows_in_aggs);
  ML_memory_free((void**)&qr_tmp);
  ML_memory_free((void**)&tmp_vect);
  ML_memory_free((void**)&work);

  aggr_curr = aggr_head;
  while ( aggr_curr != NULL ) 
  {
    supernode = aggr_curr;
    aggr_curr = aggr_curr->next;
    if ( supernode->length > 0 ) ML_free( supernode->list );
    ML_free( supernode );
  }

  return Ncoarse*nullspace_dim;

} /* ML_Aggregate_CoarsenUser */
// ================================================ ====== ==== ==== == =
int RefMaxwell_SetupCoordinates(ML_Operator* A, Teuchos::ParameterList &List_, double *&coordx, double *&coordy, double *&coordz)
// Coppied From int ML_Epetra::MultiLevelPreconditioner::SetupCoordinates()
{
  double* in_x_coord = 0;
  double* in_y_coord = 0;
  double* in_z_coord = 0;
  int NumPDEEqns_ =1;  // Always use 1 because A is a nodal matrix.

  // For node coordinates
  in_x_coord = List_.get("x-coordinates", (double *)0);
  in_y_coord = List_.get("y-coordinates", (double *)0);
  in_z_coord = List_.get("z-coordinates", (double *)0);

  if (!(in_x_coord == 0 && in_y_coord == 0 && in_z_coord == 0))
    {
      ML_Operator* AAA = A;

      int n = AAA->invec_leng, Nghost = 0;

      if (AAA->getrow->pre_comm)
	{
	  if (AAA->getrow->pre_comm->total_rcv_length <= 0)
	    ML_CommInfoOP_Compute_TotalRcvLength(AAA->getrow->pre_comm);
	  Nghost = AAA->getrow->pre_comm->total_rcv_length;
	}

      std::vector<double> tmp(Nghost + n);
      for (int i = 0 ; i < Nghost + n ; ++i)
        tmp[i] = 0.0;

      n /= NumPDEEqns_;
      Nghost /= NumPDEEqns_;

      if (in_x_coord)
	{
	  double* x_coord = (double *) ML_allocate(sizeof(double) * (Nghost+n));

	  for (int i = 0 ; i < n ; ++i)
	    tmp[i * NumPDEEqns_] = in_x_coord[i];

	  ML_exchange_bdry(&tmp[0],AAA->getrow->pre_comm, NumPDEEqns_ * n,
			   AAA->comm, ML_OVERWRITE,NULL);

	  for (int i = 0 ; i < n + Nghost ; ++i)
	    x_coord[i] = tmp[i * NumPDEEqns_];

	  coordx = x_coord;
	}

      if (in_y_coord)
	{
	  double* y_coord = (double *) ML_allocate(sizeof(double) * (Nghost+n));

	  for (int i = 0 ; i < n ; ++i)
	    tmp[i * NumPDEEqns_] = in_y_coord[i];

	  ML_exchange_bdry(&tmp[0],AAA->getrow->pre_comm, NumPDEEqns_ * n,
			   AAA->comm, ML_OVERWRITE,NULL);

	  for (int i = 0 ; i < n + Nghost ; ++i)
	    y_coord[i] = tmp[i * NumPDEEqns_];

	  coordy = y_coord;
	}

      if (in_z_coord)
	{
	  double* z_coord = (double *) ML_allocate(sizeof(double) * (Nghost+n));

	  for (int i = 0 ; i < n ; ++i)
	    tmp[i * NumPDEEqns_] = in_z_coord[i];

	  ML_exchange_bdry(&tmp[0],AAA->getrow->pre_comm, NumPDEEqns_ * n,
			   AAA->comm, ML_OVERWRITE,NULL);

	  for (int i = 0 ; i < n + Nghost ; ++i)
	    z_coord[i] = tmp[i * NumPDEEqns_];

	  coordz = z_coord;
	}

    } // if (!(in_x_coord == 0 && in_y_coord == 0 && in_z_coord == 0))

  return(0);
 }
Beispiel #13
0
void ML_get_row_CSR_norow_map(ML_Operator *input_matrix, int N_requested_rows,
        int requested_rows[], int *allocated_space, int **columns,
        double **values, int row_lengths[], int index)
{
   int    i, *mapper, *t1, row;
   ML_Operator *next;
   double *t2;
   struct ML_CSR_MSRdata *matrix;
   int    *rowptr, *bindx, *col_ptr, itemp, j;
   double *val, *val_ptr;


#ifdef DEBUG2
   if (N_requested_rows != 1) {
      printf("ML_get_matrix_row is currently implemented for only 1 row");
      printf(" at a time.\n");
      exit(1);
   }
#endif

   row = requested_rows[0]; 
#ifdef DEBUG2
   if ( (row >= input_matrix->getrow->Nrows) || (row < 0) ) {
      row_lengths[0] = 0;
      return;
   }
#endif

   next = input_matrix->sub_matrix;
   while ( (next != NULL) && (row < next->getrow->Nrows) ) {
      input_matrix = next;
      next = next->sub_matrix;
   }
   if (next != NULL) row -= next->getrow->Nrows;

   matrix = (struct ML_CSR_MSRdata *) input_matrix->data;
   rowptr = matrix->rowptr;
   itemp   = rowptr[row];
   bindx   = &(matrix->columns[itemp]);
   val     = &(matrix->values[itemp]);

   *row_lengths = rowptr[row+1] - itemp;

   if (*row_lengths+index > *allocated_space) {
      *allocated_space = 2*(*allocated_space) + 1;
      if (*row_lengths+index > *allocated_space) 
         *allocated_space = *row_lengths + 5 + index;
      t1 = (int    *) ML_allocate(*allocated_space*sizeof(int   ));
      t2 = (double *) ML_allocate(*allocated_space*sizeof(double));
      if (t2 == NULL) {
         printf("Not enough space to get a matrix row. A row length of \n");
         printf("%d was not sufficient\n",(*allocated_space-1)/2);
	 fflush(stdout);
	 ML_avoid_unused_param( (void *) &N_requested_rows);
         exit(1);
      }
      for (i = 0; i < index; i++) t1[i] = (*columns)[i];
      for (i = 0; i < index; i++) t2[i] = (*values)[i];
      ML_free(*columns);  ML_free(*values);
      *columns = t1;
      *values  = t2;
   }
   col_ptr = &((*columns)[index]);
   val_ptr = &((*values)[index]);

   for (j = 0 ; j < *row_lengths; j++) {
      *col_ptr++ = *bindx++;
   }
   for (j = 0 ; j < *row_lengths; j++) {
      *val_ptr++  = *val++;
   }

   if ( (input_matrix->getrow->use_loc_glob_map == ML_YES)) {
      mapper       = input_matrix->getrow->loc_glob_map;
      for (i = 0; i < row_lengths[0]; i++) 
         (*columns)[i+index] = mapper[(*columns)[index+i]];
   }
}
Beispiel #14
0
void ML_get_matrix_row(ML_Operator *input_matrix, int N_requested_rows,
        int requested_rows[], int *allocated_space, int **columns,
        double **values, int row_lengths[], int index)
{
   int    i, *mapper, *t1, row;
   ML_Operator *next;
   double *t2;
   void *data;
   int (*getfunction)(void *,int,int*,int,int*,double*,int*);

#ifdef DEBUG2
   if (N_requested_rows != 1) {
      printf("ML_get_matrix_row is currently implemented for only 1 row");
      printf(" at a time.\n");
      exit(1);
   }
#endif

   row = requested_rows[0]; 
#ifdef DEBUG2
   if ( (row >= input_matrix->getrow->Nrows) || (row < 0) ) {
      row_lengths[0] = 0;
      return;
   }
#endif

   if (input_matrix->getrow->row_map != NULL) {
      if (input_matrix->getrow->row_map[row] != -1) 
         row = input_matrix->getrow->row_map[row];
      else { 
	row_lengths[0] = 0; 
	ML_avoid_unused_param( (void *) &N_requested_rows);
	return;}
   }

   next = input_matrix->sub_matrix;

   while ( (next != NULL) && (row < next->getrow->Nrows) ) {
      input_matrix = next;
      next = next->sub_matrix;
   }
   if (next != NULL) row -= next->getrow->Nrows;

   data = (void *) input_matrix;
   getfunction = (int (*)(void *,int,int*,int,int*,double*,int*))
     input_matrix->getrow->func_ptr;

   while(getfunction(data,1,&row,*allocated_space-index,
               &((*columns)[index]), &((*values)[index]), row_lengths) == 0) {
      *allocated_space = 2*(*allocated_space) + 1;
      t1 = (int    *) ML_allocate(*allocated_space*sizeof(int   ));
      if (t1 == NULL) {
            printf("Not enough space to get a matrix row. A row length of \n");
            printf("%d Was not sufficient\n",(*allocated_space-1)/2);
   	    fflush(stdout);
            exit(1);
      }
      else {
         for (i = 0; i < index; i++) t1[i] = (*columns)[i];
         if (*columns != NULL) ML_free(*columns);  
         *columns = t1;
      }

      t2 = (double *) ML_allocate(*allocated_space*sizeof(double));
      if (t2 == NULL) {
            printf("Not enough space to get a matrix row. A row length of \n");
            printf("%d Was not sufficient\n",(*allocated_space-1)/2);
   	    fflush(stdout);
            exit(1);
      }
      for (i = 0; i < index; i++) t2[i] = (*values)[i];
      if (*values  != NULL) ML_free(*values);
      *values  = t2;
   }

   if ( (input_matrix->getrow->use_loc_glob_map == ML_YES)) {
      mapper       = input_matrix->getrow->loc_glob_map;
      for (i = 0; i < row_lengths[0]; i++) 
         (*columns)[i+index] = mapper[(*columns)[index+i]];
   }
}
Beispiel #15
0
int main(int argc, char *argv[])
{
	int num_PDE_eqns=3, N_levels=3, nsmooth=1;

	int    leng, level, N_grid_pts, coarsest_level;

  /* See Aztec User's Guide for more information on the */
  /* variables that follow.                             */

  int    proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
  double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE];

  /* data structure for matrix corresponding to the fine grid */

  int    *data_org = NULL, *update = NULL, *external = NULL;
  int    *update_index = NULL, *extern_index = NULL;
  int    *cpntr = NULL;
  int    *bindx = NULL, N_update, iii;
  double *val = NULL;
	double *xxx, *rhs;

	AZ_MATRIX *Amat;
	AZ_PRECOND *Pmat = NULL;
	ML *ml;
	FILE *fp;
  int ch,i,j, Nrigid, *garbage;
   struct AZ_SCALING *scaling;
double solve_time, setup_time, start_time, *mode, *rigid;
ML_Aggregate *ag;
int  nblocks, *blocks;
char filename[80];
double alpha;
int one = 1;


#ifdef ML_MPI
  MPI_Init(&argc,&argv);

  /* get number of processors and the name of this processor */

  AZ_set_proc_config(proc_config, MPI_COMM_WORLD);
#else
  AZ_set_proc_config(proc_config, AZ_NOT_MPI);
#endif

leng = 0;
if (proc_config[AZ_node] == 0) {
#ifdef binary
	fp=fopen(".data","rb");
#else
	fp=fopen(".data","r");
#endif
	if (fp==NULL)
		{
			printf("couldn't open file .data\n");
			exit(1);
		}
#ifdef binary
        fread(&leng, sizeof(int), 1, fp);
#else
	fscanf(fp,"%d",&leng);
#endif

	fclose(fp);
}
leng = AZ_gsum_int(leng, proc_config);

	N_grid_pts=leng/num_PDE_eqns;



  /* initialize the list of global indices. NOTE: the list of global */
  /* indices must be in ascending order so that subsequent calls to  */
  /* AZ_find_index() will function properly. */
	
  AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns,
                 AZ_linear);
	
	
  AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config);

  AZ_transform(proc_config, &external, bindx, val,  update, &update_index,
	       &extern_index, &data_org, N_update, 0, 0, 0, &cpntr, 
               AZ_MSR_MATRIX);
	
  Amat = AZ_matrix_create( leng );
  AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL);

  Amat->matrix_type  = data_org[AZ_matrix_type];
	
  data_org[AZ_N_rows]  = data_org[AZ_N_internal] + data_org[AZ_N_border];
			
  start_time = AZ_second();

AZ_defaults(options, params);
/*
scaling = AZ_scaling_create();
xxx = (double *) calloc( leng,sizeof(double));
rhs=(double *)calloc(leng,sizeof(double));
options[AZ_scaling] = AZ_sym_diag;
options[AZ_precond] = AZ_none;
options[AZ_max_iter] = 30;
options[AZ_keep_info] = 1;
AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 
don't forget vector rescaling ...
free(xxx);
free(rhs);
*/
options[AZ_scaling] = AZ_none;
	



  ML_Create(&ml, N_levels);
			
			
  /* set up discretization matrix and matrix vector function */
	
  AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config);
	
  ML_Aggregate_Create( &ag );

  Nrigid = 0;
if (proc_config[AZ_node] == 0) {
  sprintf(filename,"rigid_body_mode%d",Nrigid+1);
  while( (fp = fopen(filename,"r")) != NULL) {
     fclose(fp);
     Nrigid++;
     sprintf(filename,"rigid_body_mode%d",Nrigid+1);
  }
}
Nrigid = AZ_gsum_int(Nrigid,proc_config);

  if (Nrigid != 0) {
     rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) );
     if (rigid == NULL) {
        printf("Error: Not enough space for rigid body modes\n");
     }
  }

rhs=(double *)malloc(leng*sizeof(double));
AZ_random_vector(rhs, data_org, proc_config);
  
  for (i = 0; i < Nrigid; i++) {
     sprintf(filename,"rigid_body_mode%d",i+1);
     AZ_input_msr_matrix(filename, update, &mode, &garbage, 
                         N_update, proc_config);


/*
AZ_sym_rescale_sl(mode, Amat->data_org, options, proc_config, scaling);
*/
/*
Amat->matvec(mode, rigid, Amat, proc_config);
for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]);
*/
for (j = 0; j < i; j++) {
alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config);
daxpy_(&N_update, &alpha,  &(rigid[j*N_update]),  &one, mode, &one);
printf("alpha1 is %e\n",alpha);
}
alpha = -AZ_gdot(N_update, mode, rhs, proc_config)/AZ_gdot(N_update, mode, mode, proc_config);
printf("alpha2 is %e\n",alpha);
daxpy_(&N_update, &alpha,  mode,  &one, rhs, &one);

  
     for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j];
     free(mode);
     free(garbage);
  }
for (j = 0; j < Nrigid; j++) {
alpha = -AZ_gdot(N_update, rhs, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config);
daxpy_(&N_update, &alpha,  &(rigid[j*N_update]),  &one, rhs, &one);
printf("alpha4 is %e\n",alpha);
}


for (i = 0; i < Nrigid; i++) {
  alpha = -AZ_gdot(N_update, &(rigid[i*N_update]), rhs, proc_config);
  printf("alpha is %e\n",alpha);
}
  if (Nrigid != 0) {
     ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update);
/*
     free(rigid);
*/
  }

	coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, N_levels-1, ML_DECREASING, ag);
	coarsest_level = N_levels - coarsest_level;
/*
ML_Operator_Print(&(ml->Pmat[N_levels-2]), "Pmat");
exit(1);
*/

	if ( proc_config[AZ_node] == 0 )
		printf("Coarse level = %d \n", coarsest_level);
	
	/* set up smoothers */
	
	for (level = N_levels-1; level > coarsest_level; level--) {
j = 10;
if (level == N_levels-1) j = 10;
options[AZ_solver] = AZ_cg;
options[AZ_precond]=AZ_sym_GS; options[AZ_subdomain_solve]=AZ_icc;
/*
options[AZ_precond] = AZ_none;
*/
options[AZ_poly_ord] = 5;
ML_Gen_SmootherAztec(ml, level, options, params, proc_config, status,
j, ML_PRESMOOTHER,NULL);
ML_Gen_SmootherAztec(ml, level, options, params, proc_config, status,
j, ML_POSTSMOOTHER,NULL);
/*
		ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth,1.0);
		ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth,1.0);
*/
/*
                nblocks = ML_Aggregate_Get_AggrCount( ag, level );
                ML_Aggregate_Get_AggrMap( ag, level, &blocks);
                ML_Gen_Smoother_VBlockSymGaussSeidel( ml , level, ML_BOTH, nsmooth, 1.0,
                                                 nblocks, blocks);
                ML_Gen_Smoother_VBlockSymGaussSeidel( ml , level, ML_POSTSMOOTHER, nsmooth, 1.0, 
                                                 nblocks, blocks);
*/
/*
                ML_Gen_Smoother_VBlockJacobi( ml , level, ML_PRESMOOTHER, nsmooth, .5,
                                                 nblocks, blocks);
                ML_Gen_Smoother_VBlockJacobi( ml , level, ML_POSTSMOOTHER, nsmooth,.5,
                                                 nblocks, blocks);
*/
/*
		ML_Gen_Smoother_GaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth);
		ML_Gen_Smoother_GaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth);    
*/
/* 
need to change this when num_pdes is different on different levels
*/
/*
if (level == N_levels-1) {
		ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth, 0.5, num_PDE_eqns);
		ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth, 0.5, num_PDE_eqns);
}
else {
		ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth, 0.5, 2*num_PDE_eqns);
		ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth, 0.5, 2*num_PDE_eqns);
}
*/
/*
*/

/*
			ML_Gen_SmootherJacobi(ml , level, ML_PRESMOOTHER, nsmooth, .67);
			ML_Gen_SmootherJacobi(ml , level, ML_POSTSMOOTHER, nsmooth, .67 );
*/
		
		
	}
	
/*
	ML_Gen_CoarseSolverSuperLU( ml, coarsest_level);
*/
/*
ML_Gen_SmootherSymGaussSeidel(ml , coarsest_level, ML_PRESMOOTHER, 2*nsmooth,1.);
*/
/*
ML_Gen_SmootherBlockGaussSeidel(ml , level, ML_PRESMOOTHER, 50*nsmooth, 1.0, 2*num_PDE_eqns);
*/
ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, 2*nsmooth, 1.0, num_PDE_eqns);
		
	
	ML_Gen_Solver(ml, ML_MGV, N_levels-1, coarsest_level); 
	AZ_defaults(options, params);
	
        options[AZ_solver]   = AZ_GMRESR;
        options[AZ_scaling]  = AZ_none;
        options[AZ_precond]  = AZ_user_precond;
        options[AZ_conv]     = AZ_rhs;
        options[AZ_output]   = 1;
        options[AZ_max_iter] = 1500;
        options[AZ_poly_ord] = 5;
        options[AZ_kspace]   = 130;
        params[AZ_tol]       = 1.0e-8;
	
	AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); 
setup_time = AZ_second() - start_time;
	
	xxx = (double *) malloc( leng*sizeof(double));

	
        /* Set rhs */
 
        fp = fopen("AZ_capture_rhs.dat","r");
        if (fp == NULL) {
           if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n");
/*
           AZ_random_vector(rhs, data_org, proc_config);
           AZ_reorder_vec(rhs, data_org, update_index, NULL);
           AZ_random_vector(xxx, data_org, proc_config);
           AZ_reorder_vec(xxx, data_org, update_index, NULL);
           Amat->matvec(xxx, rhs, Amat, proc_config);
*/
        }
        else {
           ch = getc(fp);
           if (ch == 'S') {
              while ( (ch = getc(fp)) != '\n') ;
           }
           else ungetc(ch,fp);
           for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) 
              fscanf(fp,"%lf",&(rhs[i]));
           fclose(fp);
        }
	for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; 

        /* Set x */

        fp = fopen("AZ_capture_init_guess.dat","r");
        if (fp != NULL) {
           ch = getc(fp);
           if (ch == 'S') {
              while ( (ch = getc(fp)) != '\n') ;
           }
           else ungetc(ch,fp);
           for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++)
              fscanf(fp,"%lf",&(xxx[i]));
           fclose(fp);
           options[AZ_conv] = AZ_expected_values;
        }

        /* if Dirichlet BC ... put the answer in */

        for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) {
           if ( (val[i] > .99999999) && (val[i] < 1.0000001))
              xxx[i] = rhs[i];      
        }

        fp = fopen("AZ_no_multilevel.dat","r");
        scaling = AZ_scaling_create();
start_time = AZ_second();
        if (fp != NULL) {
           fclose(fp);
           options[AZ_precond] = AZ_none;
           options[AZ_scaling] = AZ_sym_diag;
           options[AZ_ignore_scaling] = AZ_TRUE;

           options[AZ_keep_info] = 1;
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 

/*
           options[AZ_pre_calc] = AZ_reuse;
           options[AZ_conv] = AZ_expected_values;
           if (proc_config[AZ_node] == 0) 
              printf("\n-------- Second solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 
           if (proc_config[AZ_node] == 0) 
              printf("\n-------- Third solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 
*/
        }
        else {
           options[AZ_keep_info] = 1;
/*
options[AZ_max_iter] = 40;
*/
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
for (j = 0; j < Nrigid; j++) {
alpha = -AZ_gdot(N_update, xxx, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config);
daxpy_(&N_update, &alpha,  &(rigid[j*N_update]),  &one, xxx, &one);
printf("alpha5 is %e\n",alpha);
}
AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
           options[AZ_pre_calc] = AZ_reuse;
           options[AZ_conv] = AZ_expected_values;
/*
           if (proc_config[AZ_node] == 0) 
              printf("\n-------- Second solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
           if (proc_config[AZ_node] == 0) 
              printf("\n-------- Third solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
*/
        }
   solve_time = AZ_second() - start_time;

   if (proc_config[AZ_node] == 0) 
      printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time);

   ML_Aggregate_Destroy(&ag);
   ML_Destroy(&ml);
   AZ_free((void *) Amat->data_org);
   AZ_free((void *) Amat->val);
   AZ_free((void *) Amat->bindx);
   AZ_free((void *) update);
   AZ_free((void *) external);
   AZ_free((void *) extern_index);
   AZ_free((void *) update_index);
   if (Amat  != NULL) AZ_matrix_destroy(&Amat);
   if (Pmat  != NULL) AZ_precond_destroy(&Pmat);
   free(xxx);
   free(rhs);


#ifdef ML_MPI
  MPI_Finalize();
#endif
	
  return 0;
	
}
Beispiel #16
0
int main(int argc, char *argv[])
{
	int num_PDE_eqns=6, N_levels=4, nsmooth=2;

	int    leng, level, N_grid_pts, coarsest_level;

  /* See Aztec User's Guide for more information on the */
  /* variables that follow.                             */

  int    proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
  double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE];

  /* data structure for matrix corresponding to the fine grid */

  double *val = NULL, *xxx, *rhs, solve_time, setup_time, start_time;
  AZ_MATRIX *Amat;
  AZ_PRECOND *Pmat = NULL;
  ML *ml;
  FILE *fp;
  int i, j, Nrigid, *garbage = NULL;
#ifdef ML_partition
  int nblocks;
  int *block_list = NULL;
  int k;
#endif
  struct AZ_SCALING *scaling;
  ML_Aggregate *ag;
double *mode, *rigid;
char filename[80];
double alpha;
int allocated = 0;
int old_prec, old_sol;
double old_tol;
/*
double *Amode, beta, biggest;
int big_ind = -1, ii;
*/
ML_Operator *Amatrix;
int *rowi_col = NULL, rowi_N, count2, ccc;
double *rowi_val = NULL;
double max_diag, min_diag, max_sum, sum;
 int nBlocks, *blockIndices, Ndof;
#ifdef ML_partition
   FILE *fp2;
   int count;

   if (argc != 2) {
     printf("Usage: ml_read_elas num_processors\n");
     exit(1);
   }
   else sscanf(argv[1],"%d",&nblocks);
#endif

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  /* get number of processors and the name of this processor */

  AZ_set_proc_config(proc_config, MPI_COMM_WORLD);
#else
  AZ_set_proc_config(proc_config, AZ_NOT_MPI);
#endif

  /* read in the number of matrix equations */
  leng = 0;
  if (proc_config[AZ_node] == 0) {
#    ifdef binary
	fp=fopen(".data","rb");
#    else
	fp=fopen(".data","r");
#    endif
     if (fp==NULL) {
        printf("couldn't open file .data\n");
        exit(1);
     }
#    ifdef binary
        fread(&leng, sizeof(int), 1, fp);
#    else
        fscanf(fp,"%d",&leng);
#    endif
     fclose(fp);
  }
  leng = AZ_gsum_int(leng, proc_config);

  N_grid_pts=leng/num_PDE_eqns;

  /* initialize the list of global indices. NOTE: the list of global */
  /* indices must be in ascending order so that subsequent calls to  */
  /* AZ_find_index() will function properly. */

  if (proc_config[AZ_N_procs] == 1) i = AZ_linear;
  else i = AZ_file;
  AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns,i);

  AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config);


  /* This code is to fix things up so that we are sure we have */
  /* all block (including the ghost nodes the same size.       */

  AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update);

  AZ_transform_norowreordering(proc_config, &external, bindx, val,  update, &update_index,
	       &extern_index, &data_org, N_update, 0, 0, 0, &cpntr,
	       AZ_MSR_MATRIX);

  Amat = AZ_matrix_create( leng );
  AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL);

  Amat->matrix_type  = data_org[AZ_matrix_type];

  data_org[AZ_N_rows]  = data_org[AZ_N_internal] + data_org[AZ_N_border];

#ifdef SCALE_ME
  ML_MSR_sym_diagonal_scaling(Amat, proc_config, &scaling_vect);
#endif

  start_time = AZ_second();

  options[AZ_scaling] = AZ_none;
  ML_Create(&ml, N_levels);
  ML_Set_PrintLevel(10);


  /* set up discretization matrix and matrix vector function */

  AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config);

#ifdef ML_partition

  /* this code is meant to partition the matrices so that things can be */
  /* run in parallel later.                                             */
  /* It is meant to be run on only one processor.                       */
#ifdef	MB_MODIF
  fp2 = fopen(".update","w");
#else
  fp2 = fopen("partition_file","w");
#endif

  ML_Operator_AmalgamateAndDropWeak(&(ml->Amat[N_levels-1]), num_PDE_eqns, 0.0);
  ML_Gen_Blocks_Metis(ml, N_levels-1, &nblocks, &block_list);

  for (i = 0; i < nblocks; i++) {
     count = 0;
     for (j = 0; j < ml->Amat[N_levels-1].outvec_leng; j++) {
        if (block_list[j] == i) count++;
     }
     fprintf(fp2,"   %d\n",count*num_PDE_eqns);
     for (j = 0; j < ml->Amat[N_levels-1].outvec_leng; j++) {
        if (block_list[j] == i) {
           for (k = 0; k < num_PDE_eqns; k++)  fprintf(fp2,"%d\n",j*num_PDE_eqns+k);
        }
     }
  }
  fclose(fp2);
  ML_Operator_UnAmalgamateAndDropWeak(&(ml->Amat[N_levels-1]),num_PDE_eqns,0.0);
#ifdef	MB_MODIF
  printf(" partition file dumped in .update\n");
#endif
  exit(1);
#endif

  ML_Aggregate_Create( &ag );
/*
  ML_Aggregate_Set_CoarsenScheme_MIS(ag);
*/
#ifdef MB_MODIF
  ML_Aggregate_Set_DampingFactor(ag,1.50);
#else
  ML_Aggregate_Set_DampingFactor(ag,1.5);
#endif
  ML_Aggregate_Set_CoarsenScheme_METIS(ag);
  ML_Aggregate_Set_NodesPerAggr( ml, ag, -1, 35);
  /*
  ML_Aggregate_Set_Phase3AggregateCreationAggressiveness(ag, 10.001);
  */


  ML_Aggregate_Set_Threshold(ag, 0.0);
  ML_Aggregate_Set_MaxCoarseSize( ag, 300);


  /* read in the rigid body modes */

   Nrigid = 0;

  /* to ensure compatibility with RBM dumping software */
   if (proc_config[AZ_node] == 0) {

      sprintf(filename,"rigid_body_mode%02d",Nrigid+1);
      while( (fp = fopen(filename,"r")) != NULL) {
	which_filename = 1;
          fclose(fp);
          Nrigid++;
          sprintf(filename,"rigid_body_mode%02d",Nrigid+1);
      }
      sprintf(filename,"rigid_body_mode%d",Nrigid+1);
      while( (fp = fopen(filename,"r")) != NULL) {
          fclose(fp);
          Nrigid++;
          sprintf(filename,"rigid_body_mode%d",Nrigid+1);
      }
    }

    Nrigid = AZ_gsum_int(Nrigid,proc_config);

    if (Nrigid != 0) {
       rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) );
       if (rigid == NULL) {
          printf("Error: Not enough space for rigid body modes\n");
       }
    }

    rhs   = (double *) malloc(leng*sizeof(double));
    xxx   = (double *) malloc(leng*sizeof(double));

    for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0;



    for (i = 0; i < Nrigid; i++) {
       if (which_filename == 1) sprintf(filename,"rigid_body_mode%02d",i+1);
       else sprintf(filename,"rigid_body_mode%d",i+1);
       AZ_input_msr_matrix(filename,update,&mode,&garbage,N_update,proc_config);
       AZ_reorder_vec(mode, data_org, update_index, NULL);
       /* here is something to stick a rigid body mode as the initial */
       /* The idea is to solve A x = 0 without smoothing with a two   */
       /* level method. If everything is done properly, we should     */
       /* converge in 2 iterations.                                   */
       /* Note: we must also zero out components of the rigid body    */
       /* mode that correspond to Dirichlet bcs.                      */

       if (i == -4) {
          for (iii = 0; iii < leng; iii++) xxx[iii] = mode[iii];

          ccc = 0;
          Amatrix = &(ml->Amat[N_levels-1]);
          for (iii = 0; iii < Amatrix->outvec_leng; iii++) {
             ML_get_matrix_row(Amatrix,1,&iii,&allocated,&rowi_col,&rowi_val,
                               &rowi_N, 0);
             count2 = 0;
             for (j = 0; j < rowi_N; j++) if (rowi_val[j] != 0.) count2++;
             if (count2 <= 1) { xxx[iii] = 0.; ccc++; }
          }
          free(rowi_col); free(rowi_val);
          allocated = 0; rowi_col = NULL; rowi_val = NULL;
       }

       /*
        *  Rescale matrix/rigid body modes and checking
        *
        AZ_sym_rescale_sl(mode, Amat->data_org, options, proc_config, scaling);
        Amat->matvec(mode, rigid, Amat, proc_config);
        for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]);
        */

        /* Here is some code to check that the rigid body modes are  */
        /* really rigid body modes. The idea is to multiply by A and */
        /* then to zero out things that we "think" are boundaries.   */
        /* In this hardwired example, things near boundaries         */
        /* correspond to matrix rows that do not have 81 nonzeros.   */
        /*

        Amode = (double *) malloc(leng*sizeof(double));
        Amat->matvec(mode, Amode, Amat, proc_config);
        j = 0;
        biggest = 0.0;
        for (ii = 0; ii < N_update; ii++) {
           if ( Amat->bindx[ii+1] - Amat->bindx[ii] != 80) {
              Amode[ii] = 0.; j++;
           }
           else {
              if ( fabs(Amode[ii]) > biggest) {
                 biggest=fabs(Amode[ii]); big_ind = ii;
              }
           }
        }
        printf("%d entries zeroed out of %d elements\n",j,N_update);
        alpha = AZ_gdot(N_update, Amode, Amode, proc_config);
        beta  = AZ_gdot(N_update,  mode,  mode, proc_config);
        printf("||A r||^2 =%e, ||r||^2 = %e, ratio = %e\n",
               alpha,beta,alpha/beta);
        printf("the biggest is %e at row %d\n",biggest,big_ind);
        free(Amode);

        */

        /* orthogonalize mode with respect to previous modes. */

        for (j = 0; j < i; j++) {
           alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/
                    AZ_gdot(N_update, &(rigid[j*N_update]),
                               &(rigid[j*N_update]), proc_config);
	   /*           daxpy_(&N_update,&alpha,&(rigid[j*N_update]),  &one, mode, &one); */
        }
#ifndef	MB_MODIF
       printf(" after mb %e %e %e\n",mode[0],mode[1],mode[2]);
#endif

        for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j];
        free(mode);
        free(garbage); garbage = NULL;

    }

    if (Nrigid != 0) {
             ML_Aggregate_Set_BlockDiagScaling(ag);
       ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update);
       free(rigid);
    }
#ifdef SCALE_ME
    ML_Aggregate_Scale_NullSpace(ag, scaling_vect, N_update);
#endif

    coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, N_levels-1,
				ML_DECREASING, ag);
   AZ_defaults(options, params);
   coarsest_level = N_levels - coarsest_level;
   if ( proc_config[AZ_node] == 0 )
	printf("Coarse level = %d \n", coarsest_level);

   /* set up smoothers */

   for (level = N_levels-1; level > coarsest_level; level--) {

/*
      ML_Gen_Smoother_BlockGaussSeidel(ml, level,ML_BOTH, 1, 1., num_PDE_eqns);
*/

    /*  Sparse approximate inverse smoother that acutally does both */
    /*  pre and post smoothing.                                     */
    /*
      ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth,
                                parasails_sym, parasails_thresh,
                                parasails_nlevels, parasails_filter,
                                parasails_loadbal, parasails_factorized);
     */

     /* This is the symmetric Gauss-Seidel smoothing that we usually use. */
     /* In parallel, it is not a true Gauss-Seidel in that each processor */
     /* does a Gauss-Seidel on its local submatrix independent of the     */
     /* other processors.                                                 */

     /* ML_Gen_Smoother_Cheby(ml, level, ML_BOTH, 30., nsmooth); */
     Ndof = ml->Amat[level].invec_leng;

     ML_Gen_Blocks_Aggregates(ag, level, &nBlocks, &blockIndices);

     ML_Gen_Smoother_BlockDiagScaledCheby(ml, level, ML_BOTH, 30.,nsmooth,
					  nBlocks, blockIndices);

     /*
      ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_BOTH, nsmooth,1.);
     */


      /* This is a true Gauss Seidel in parallel. This seems to work for  */
      /* elasticity problems.  However, I don't believe that this is very */
      /* efficient in parallel.                                           */
     /*
      nblocks = ml->Amat[level].invec_leng/num_PDE_eqns;
      blocks = (int *) ML_allocate(sizeof(int)*N_update);
      for (i =0; i < ml->Amat[level].invec_leng; i++)
         blocks[i] = i/num_PDE_eqns;

      ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml , level, ML_PRESMOOTHER,
                                                  nsmooth, 1., nblocks, blocks);
      ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml, level, ML_POSTSMOOTHER,
                                                  nsmooth, 1., nblocks, blocks);
      free(blocks);
*/

      /* Block Jacobi Smoothing */
      /*
      nblocks = ml->Amat[level].invec_leng/num_PDE_eqns;
      blocks = (int *) ML_allocate(sizeof(int)*N_update);
      for (i =0; i < ml->Amat[level].invec_leng; i++)
         blocks[i] = i/num_PDE_eqns;

      ML_Gen_Smoother_VBlockJacobi(ml , level, ML_BOTH, nsmooth,
                                   ML_ONE_STEP_CG, nblocks, blocks);
      free(blocks);
      */

      /* Jacobi Smoothing                                                 */
     /*

      ML_Gen_Smoother_Jacobi(ml , level, ML_PRESMOOTHER, nsmooth, ML_ONE_STEP_CG);
      ML_Gen_Smoother_Jacobi(ml , level, ML_POSTSMOOTHER, nsmooth,ML_ONE_STEP_CG);
     */



      /*  This does a block Gauss-Seidel (not true GS in parallel)        */
      /*  where each processor has 'nblocks' blocks.                      */
      /*
      nblocks = 250;
      ML_Gen_Blocks_Metis(ml, level, &nblocks, &blocks);
      ML_Gen_Smoother_VBlockJacobi(ml , level, ML_BOTH, nsmooth,ML_ONE_STEP_CG,
                                        nblocks, blocks);
      free(blocks);
      */
      num_PDE_eqns = 6;
   }
   /* Choose coarse grid solver: mls, superlu, symGS, or Aztec */

   /*
   ML_Gen_Smoother_Cheby(ml, coarsest_level, ML_BOTH, 30., nsmooth);
   ML_Gen_CoarseSolverSuperLU( ml, coarsest_level);
   */
   /*
   ML_Gen_Smoother_SymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1.);
   */

   old_prec = options[AZ_precond];
   old_sol  = options[AZ_solver];
   old_tol  = params[AZ_tol];
   params[AZ_tol] = 1.0e-9;
   params[AZ_tol] = 1.0e-5;
   options[AZ_precond] = AZ_Jacobi;
   options[AZ_solver]  = AZ_cg;
   options[AZ_poly_ord] = 1;
   options[AZ_conv] = AZ_r0;
   options[AZ_orth_kvecs] = AZ_TRUE;

   j = AZ_gsum_int(ml->Amat[coarsest_level].outvec_leng, proc_config);

   options[AZ_keep_kvecs] = j - 6;
   options[AZ_max_iter] =  options[AZ_keep_kvecs];

   ML_Gen_SmootherAztec(ml, coarsest_level, options, params,
            proc_config, status, options[AZ_keep_kvecs], ML_PRESMOOTHER, NULL);

   options[AZ_conv] = AZ_noscaled;
   options[AZ_keep_kvecs] = 0;
   options[AZ_orth_kvecs] = 0;
   options[AZ_precond] = old_prec;
   options[AZ_solver] = old_sol;
   params[AZ_tol] = old_tol;

   /*   */


#ifdef RST_MODIF
   ML_Gen_Solver(ml, ML_MGV, N_levels-1, coarsest_level);
#else
#ifdef	MB_MODIF
   ML_Gen_Solver(ml, ML_SAAMG,   N_levels-1, coarsest_level);
#else
   ML_Gen_Solver(ml, ML_MGFULLV, N_levels-1, coarsest_level);
#endif
#endif

   options[AZ_solver]   = AZ_GMRESR;
         options[AZ_solver]   = AZ_cg;
   options[AZ_scaling]  = AZ_none;
   options[AZ_precond]  = AZ_user_precond;
   options[AZ_conv]     = AZ_r0;
   options[AZ_conv] = AZ_noscaled;
   options[AZ_output]   = 1;
   options[AZ_max_iter] = 500;
   options[AZ_poly_ord] = 5;
   options[AZ_kspace]   = 40;
   params[AZ_tol]       = 4.8e-6;

   AZ_set_ML_preconditioner(&Pmat, Amat, ml, options);
   setup_time = AZ_second() - start_time;

   /* Set rhs */

   fp = fopen("AZ_capture_rhs.dat","r");
   if (fp == NULL) {
      AZ_random_vector(rhs, data_org, proc_config);
      if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n");
      for (i = 0; i < -N_update; i++) {
        rhs[i] = (double) update[i]; rhs[i] = 7.;
      }
   }
   else {
      if (proc_config[AZ_node]== 0) printf("reading rhs guess from file\n");
      AZ_input_msr_matrix("AZ_capture_rhs.dat", update, &rhs, &garbage,
			  N_update, proc_config);
      free(garbage);
   }
   AZ_reorder_vec(rhs, data_org, update_index, NULL);

   printf("changing rhs by multiplying with A\n");
  Amat->matvec(rhs, xxx, Amat, proc_config);
  for (i = 0; i < N_update; i++) rhs[i] = xxx[i];

   fp = fopen("AZ_capture_init_guess.dat","r");
   if (fp != NULL) {
      fclose(fp);
      if (proc_config[AZ_node]== 0) printf("reading initial guess from file\n");
      AZ_input_msr_matrix("AZ_capture_init_guess.dat", update, &xxx, &garbage,
      			  N_update, proc_config);
      free(garbage);


      xxx = (double *) realloc(xxx, sizeof(double)*(
					 Amat->data_org[AZ_N_internal]+
					 Amat->data_org[AZ_N_border] +
					 Amat->data_org[AZ_N_external]));
   }
   AZ_reorder_vec(xxx, data_org, update_index, NULL);

   /* if Dirichlet BC ... put the answer in */

/*
   for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) {
      if ( (val[i] > .99999999) && (val[i] < 1.0000001))
         xxx[i] = rhs[i];
   }
*/

   fp = fopen("AZ_no_multilevel.dat","r");
   scaling = AZ_scaling_create();
   start_time = AZ_second();


   if (fp != NULL) {
      fclose(fp);
      options[AZ_precond] = AZ_none;
      options[AZ_scaling] = AZ_sym_diag;
      options[AZ_ignore_scaling] = AZ_TRUE;

      options[AZ_keep_info] = 1;
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);

/*
      options[AZ_pre_calc] = AZ_reuse;
      options[AZ_conv] = AZ_expected_values;
      if (proc_config[AZ_node] == 0)
              printf("\n-------- Second solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);
      if (proc_config[AZ_node] == 0)
              printf("\n-------- Third solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);
*/
   }
   else {
      options[AZ_keep_info] = 1;
      options[AZ_conv] = AZ_noscaled;
      options[AZ_conv] = AZ_r0;
      params[AZ_tol] = 1.0e-7;
      /* ML_Iterate(ml, xxx, rhs); */
alpha = sqrt(AZ_gdot(N_update, xxx, xxx, proc_config));
printf("init guess = %e\n",alpha);
alpha = sqrt(AZ_gdot(N_update, rhs, rhs, proc_config));
printf("rhs = %e\n",alpha);
#ifdef SCALE_ME
	ML_MSR_scalerhs(rhs, scaling_vect, data_org[AZ_N_internal] +
                    data_org[AZ_N_border]);
	ML_MSR_scalesol(xxx, scaling_vect, data_org[AZ_N_internal] +
			data_org[AZ_N_border]);
#endif

max_diag = 0.;
min_diag = 1.e30;
max_sum  = 0.;
for (i = 0; i < N_update; i++) {
   if (Amat->val[i] < 0.) printf("woops negative diagonal A(%d,%d) = %e\n",
				 i,i,Amat->val[i]);
   if (Amat->val[i] > max_diag) max_diag = Amat->val[i];
   if (Amat->val[i] < min_diag) min_diag = Amat->val[i];
   sum = fabs(Amat->val[i]);
   for (j = Amat->bindx[i]; j < Amat->bindx[i+1]; j++) {
      sum += fabs(Amat->val[j]);
   }
   if (sum > max_sum) max_sum = sum;
}
printf("Largest diagonal = %e, min diag = %e large abs row sum = %e\n",
max_diag, min_diag, max_sum);

      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);

      options[AZ_pre_calc] = AZ_reuse;
      options[AZ_conv] = AZ_expected_values;
/*
      if (proc_config[AZ_node] == 0)
              printf("\n-------- Second solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);
      if (proc_config[AZ_node] == 0)
              printf("\n-------- Third solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);
*/
   }
   solve_time = AZ_second() - start_time;

   if (proc_config[AZ_node] == 0)
      printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time);
   if (proc_config[AZ_node] == 0)
     printf("Printing out a few entries of the solution ...\n");

   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 7) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 23) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 47) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 101) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 171) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}


   ML_Aggregate_Destroy(&ag);
   ML_Destroy(&ml);
   AZ_free((void *) Amat->data_org);
   AZ_free((void *) Amat->val);
   AZ_free((void *) Amat->bindx);
   AZ_free((void *) update);
   AZ_free((void *) external);
   AZ_free((void *) extern_index);
   AZ_free((void *) update_index);
   AZ_scaling_destroy(&scaling);
   if (Amat  != NULL) AZ_matrix_destroy(&Amat);
   if (Pmat  != NULL) AZ_precond_destroy(&Pmat);
   free(xxx);
   free(rhs);


#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return 0;

}
// ====================================================================== 
int ML_Operator_Add2(ML_Operator *A, ML_Operator *B, ML_Operator *C,
		    int matrix_type, double scalarA, double scalarB)
{
  int A_allocated = 0, *A_bindx = NULL, B_allocated = 0, *B_bindx = NULL;
  double *A_val = NULL, *B_val = NULL, *hashed_vals;
  int i, A_length, B_length, *hashed_inds;
  int max_nz_per_row = 0, min_nz_per_row=1e6, j;
  int hash_val, index_length;
  int *columns, *rowptr, nz_ptr, hash_used, global_col;
  double *values;
  struct ML_CSR_MSRdata *temp;
  int *A_gids, *B_gids;
  int max_per_proc;
#ifdef ML_WITH_EPETRA
  int count;
#endif

  if (A->getrow == NULL) 
    pr_error("ML_Operator_Add: A does not have a getrow function.\n");

  if (B->getrow == NULL) 
    pr_error("ML_Operator_Add: B does not have a getrow function.\n");

  if (A->getrow->Nrows != B->getrow->Nrows) {
    printf("ML_Operator_Add: Can not add, two matrices do not have the same");
    printf(" number of rows %d vs %d",A->getrow->Nrows,B->getrow->Nrows);
    exit(1);
  }

  if (A->invec_leng != B->invec_leng) {
    printf("ML_Operator_Add: Can not add, two matrices do not have the same");
    printf(" number of columns %d vs %d",A->getrow->Nrows,B->getrow->Nrows);
    exit(1);
  }

  /* let's just count some things */
  index_length = A->invec_leng + 1;
  if (A->getrow->pre_comm != NULL) {
    ML_CommInfoOP_Compute_TotalRcvLength(A->getrow->pre_comm);
    index_length += A->getrow->pre_comm->total_rcv_length;
  }
  if (B->getrow->pre_comm != NULL) {
    ML_CommInfoOP_Compute_TotalRcvLength(B->getrow->pre_comm);
    index_length += B->getrow->pre_comm->total_rcv_length;
  }

  ML_create_unique_col_id(A->invec_leng, &A_gids, A->getrow->pre_comm,
			  &max_per_proc,A->comm);
  ML_create_unique_col_id(B->invec_leng, &B_gids, B->getrow->pre_comm,
			  &max_per_proc,B->comm);


  hashed_inds = (int *) ML_allocate(sizeof(int)*index_length);
  hashed_vals = (double *) ML_allocate(sizeof(double)*index_length);

  for (i = 0; i < index_length; i++) hashed_inds[i] = -1;
  for (i = 0; i < index_length; i++) hashed_vals[i] = 0.;

  nz_ptr = 0;
  for (i = 0 ; i < A->getrow->Nrows; i++) {
    hash_used = 0;
      ML_get_matrix_row(A, 1, &i, &A_allocated, &A_bindx, &A_val,
                        &A_length, 0);
      for (j = 0; j < A_length; j++) {
	global_col = A_gids[A_bindx[j]];
	ML_hash_it(global_col, hashed_inds, index_length,&hash_used,&hash_val);
        hashed_inds[hash_val] = global_col;
        hashed_vals[hash_val] += scalarA * A_val[j];
	A_bindx[j] = hash_val;
      }

      ML_get_matrix_row(B, 1, &i, &B_allocated, &B_bindx, &B_val,
                        &B_length, 0);
      for (j = 0; j < B_length; j++) {
	global_col = B_gids[B_bindx[j]];
	ML_hash_it(global_col, hashed_inds, index_length,&hash_used, &hash_val);
        hashed_inds[hash_val] = global_col;
        hashed_vals[hash_val] += scalarB*B_val[j];
        B_bindx[j] = hash_val;
      }

      for (j = 0; j < A_length; j++) {
        nz_ptr++;
	hashed_inds[A_bindx[j]] = -1;
	hashed_vals[A_bindx[j]] = 0.;
      }
      for (j = 0; j < B_length; j++) {
        if (hashed_inds[B_bindx[j]] != -1) {
	  nz_ptr++;
	  hashed_inds[B_bindx[j]] = -1;
	  hashed_vals[B_bindx[j]] = 0.;
	}
      }
  }
  nz_ptr++;

  columns = 0;
  values = 0;

  rowptr = (int    *) ML_allocate(sizeof(int)*(A->outvec_leng+1));
  if (matrix_type == ML_CSR_MATRIX) {
    columns= (int    *) ML_allocate(sizeof(int)*nz_ptr);
    values = (double *) ML_allocate(sizeof(double)*nz_ptr);
  }
#ifdef ML_WITH_EPETRA
  else if (matrix_type == ML_EpetraCRS_MATRIX) {
    columns= (int    *) ML_allocate(sizeof(int)*(index_length+1));
    values = (double *) ML_allocate(sizeof(double)*(index_length+1));
  }
#endif
  else {
    pr_error("ML_Operator_Add: Unknown matrix type\n");
  }

  nz_ptr = 0;
  rowptr[0] = 0;
  for (i = 0 ; i < A->getrow->Nrows; i++) {
    hash_used = 0;
      ML_get_matrix_row(A, 1, &i, &A_allocated, &A_bindx, &A_val,
                        &A_length, 0);
      for (j = 0; j < A_length; j++) {
	global_col = A_gids[A_bindx[j]];
	ML_hash_it(global_col, hashed_inds, index_length,&hash_used, &hash_val);
        hashed_inds[hash_val] = global_col;
        hashed_vals[hash_val] += scalarA * A_val[j];
	A_bindx[j] = hash_val;
      }

      ML_get_matrix_row(B, 1, &i, &B_allocated, &B_bindx, &B_val,
                        &B_length, 0);
      for (j = 0; j < B_length; j++) {
	global_col = B_gids[B_bindx[j]];
	ML_hash_it(global_col, hashed_inds, index_length,&hash_used, &hash_val);
        hashed_inds[hash_val] = global_col;
        hashed_vals[hash_val] += scalarB*B_val[j];
        B_bindx[j] = hash_val;
      }
#ifdef ML_WITH_EPETRA
      if (matrix_type == ML_EpetraCRS_MATRIX) {
	for (j = 0; j < A_length; j++) {
	  columns[j] = hashed_inds[A_bindx[j]];
	  values[j]  = hashed_vals[A_bindx[j]];
	  nz_ptr++;
	  hashed_inds[A_bindx[j]] = -1;
	  hashed_vals[A_bindx[j]] = 0.;
	}
	count = A_length;
	for (j = 0; j < B_length; j++) {
	  if (hashed_inds[B_bindx[j]] != -1) {
	    columns[count] = hashed_inds[B_bindx[j]];
	    values[count++]  = hashed_vals[B_bindx[j]];
	    nz_ptr++;
	    hashed_inds[B_bindx[j]] = -1;
	    hashed_vals[B_bindx[j]] = 0.;
	  }
	}
	ML_Epetra_CRSinsert(C,i,columns,values,count);
      }
      else {
#endif
	for (j = 0; j < A_length; j++) {
	  columns[nz_ptr] = hashed_inds[A_bindx[j]];
	  values[nz_ptr]  = hashed_vals[A_bindx[j]];
	  nz_ptr++;
	  hashed_inds[A_bindx[j]] = -1;
	  hashed_vals[A_bindx[j]] = 0.;
	}
	for (j = 0; j < B_length; j++) {
	  if (hashed_inds[B_bindx[j]] != -1) {
	    columns[nz_ptr] = hashed_inds[B_bindx[j]];
	    values[nz_ptr]  = hashed_vals[B_bindx[j]];
	    nz_ptr++;
	    hashed_inds[B_bindx[j]] = -1;
	    hashed_vals[B_bindx[j]] = 0.;
	  }
	}
#ifdef ML_WITH_EPETRA
      }
#endif
      rowptr[i+1] = nz_ptr;
      j = rowptr[i+1] - rowptr[i];
      if (j > max_nz_per_row)
        max_nz_per_row = j;
      if (j < min_nz_per_row && j>0)
        min_nz_per_row = j;
  }
  if (matrix_type == ML_CSR_MATRIX) {
    temp = (struct ML_CSR_MSRdata *) ML_allocate(sizeof(struct ML_CSR_MSRdata));
    if (temp == NULL) pr_error("ML_Operator_Add: no space for temp\n");
    temp->columns = columns;
    temp->values  = values;
    temp->rowptr   = rowptr;

    ML_Operator_Set_ApplyFuncData(C, B->invec_leng, A->outvec_leng, 
				  temp,A->outvec_leng, NULL,0);
    ML_Operator_Set_Getrow(C, A->outvec_leng, CSR_getrow);
    ML_Operator_Set_ApplyFunc (C, CSR_matvec);
    ML_globalcsr2localcsr(C, max_per_proc);
    C->data_destroy = ML_CSR_MSRdata_Destroy;

    C->max_nz_per_row = max_nz_per_row;
    C->min_nz_per_row = min_nz_per_row;
    C->N_nonzeros     = nz_ptr;
  }
#ifdef ML_WITH_EPETRA
  else {
    ML_free(rowptr); 
    ML_free(columns);
    ML_free(values);
  }
#endif

  ML_free(A_gids);
  ML_free(B_gids);
  ML_free(hashed_vals);
  ML_free(hashed_inds);
  ML_free(A_val);
  ML_free(A_bindx);
  ML_free(B_val);
  ML_free(B_bindx);

  return 1;

}
// ================================================ ====== ==== ==== == =
// Copied from ml_agg_genP.c
static void ML_Init_Aux(ML_Operator* A, Teuchos::ParameterList &List) {
  int i, j, n, count, num_PDEs, BlockRow, BlockCol;
  double threshold;
  int* columns;
  double* values;
  int allocated, entries = 0;
  int N_dimensions;
  int DiagID;
  double DiagValue;
  int** filter;
  double dist;
  double *LaplacianDiag;
  int     Nghost;


  // Boundary exchange the coords
  double *x_coord=0, *y_coord=0, *z_coord=0;
  RefMaxwell_SetupCoordinates(A,List,x_coord,y_coord,z_coord);
  int dim=(x_coord!=0) + (y_coord!=0) + (z_coord!=0);

  /* Sanity Checks */
  if(dim == 0 || ((!x_coord && (y_coord || z_coord)) || (x_coord && !y_coord && z_coord))){
    std::cerr<<"Error: Coordinates not defined.  This is necessary for aux aggregation (found "<<dim<<" coordinates).\n";
    exit(-1);
  }

  num_PDEs = A->num_PDEs;
  N_dimensions = dim;
  threshold = A->aux_data->threshold;

  ML_Operator_AmalgamateAndDropWeak(A, num_PDEs, 0.0);
  n = A->invec_leng;
  Nghost = ML_CommInfoOP_Compute_TotalRcvLength(A->getrow->pre_comm);

  LaplacianDiag = (double *) ML_allocate((A->getrow->Nrows+Nghost+1)*
                                         sizeof(double));

  filter = (int**) ML_allocate(sizeof(int*) * n);

  allocated = 128;
  columns = (int *)    ML_allocate(allocated * sizeof(int));
  values  = (double *) ML_allocate(allocated * sizeof(double));

  for (i = 0 ; i < n ; ++i) {
    BlockRow = i;
    DiagID = -1;
    DiagValue = 0.0;

    ML_get_matrix_row(A,1,&i,&allocated,&columns,&values, &entries,0);

    for (j = 0; j < entries; j++) {
      BlockCol = columns[j];
      if (BlockRow != BlockCol) {
        dist = 0.0;

        switch (N_dimensions) {
        case 3:
          dist += (z_coord[BlockRow] - z_coord[BlockCol]) * (z_coord[BlockRow] - z_coord[BlockCol]);
        case 2:
          dist += (y_coord[BlockRow] - y_coord[BlockCol]) * (y_coord[BlockRow] - y_coord[BlockCol]);
        case 1:
          dist += (x_coord[BlockRow] - x_coord[BlockCol]) * (x_coord[BlockRow] - x_coord[BlockCol]);
        }

        if (dist == 0.0) {
          printf("node %d = %e ", i, x_coord[BlockRow]);
          if (N_dimensions > 1) printf(" %e ", y_coord[BlockRow]);
          if (N_dimensions > 2) printf(" %e ", z_coord[BlockRow]);
          printf("\n");
          printf("node %d = %e ", j, x_coord[BlockCol]);
          if (N_dimensions > 1) printf(" %e ", y_coord[BlockCol]);
          if (N_dimensions > 2) printf(" %e ", z_coord[BlockCol]);
          printf("\n");
          printf("Operator has inlen = %d and outlen = %d\n",
                 A->invec_leng, A->outvec_leng);
        }

        dist = 1.0 / dist;
        DiagValue += dist;
      }
      else if (columns[j] == i) {
        DiagID = j;
      }
    }

    if (DiagID == -1) {
      fprintf(stderr, "ERROR: matrix has no diagonal!\n"
              "ERROR: (file %s, line %d)\n",
              __FILE__, __LINE__);
      exit(EXIT_FAILURE);
    }
    LaplacianDiag[BlockRow] = DiagValue;
  }
  if ( A->getrow->pre_comm != NULL )
     ML_exchange_bdry(LaplacianDiag,A->getrow->pre_comm,A->getrow->Nrows,
                      A->comm, ML_OVERWRITE,NULL);


  for (i = 0 ; i < n ; ++i) {
    BlockRow = i;

    ML_get_matrix_row(A,1,&i,&allocated,&columns,&values, &entries,0);

    for (j = 0; j < entries; j++) {
      BlockCol = columns[j];
      if (BlockRow != BlockCol) {
        dist = 0.0;
        switch (N_dimensions) {
        case 3:
          dist += (z_coord[BlockRow] - z_coord[BlockCol]) * (z_coord[BlockRow] - z_coord[BlockCol]);
        case 2:
          dist += (y_coord[BlockRow] - y_coord[BlockCol]) * (y_coord[BlockRow] - y_coord[BlockCol]);
        case 1:
          dist += (x_coord[BlockRow] - x_coord[BlockCol]) * (x_coord[BlockRow] - x_coord[BlockCol]);
        }

        dist = 1.0 / dist;
        values[j] = dist;
      }
    }

    count = 0;
    for (j = 0 ; j < entries ; ++j) {
      if (  (i != columns[j]) &&
            (values[j]*values[j] <
       LaplacianDiag[BlockRow]*LaplacianDiag[columns[j]]*threshold*threshold)){
        columns[count++] = columns[j];
      }
    }

    /* insert the rows */
    filter[BlockRow] = (int*) ML_allocate(sizeof(int) * (count + 1));
    filter[BlockRow][0] = count;

    for (j = 0 ; j < count ; ++j)
      filter[BlockRow][j + 1] = columns[j];

  }

  ML_free(columns);
  ML_free(values);

  ML_free(LaplacianDiag);

  ML_Operator_UnAmalgamateAndDropWeak(A, num_PDEs, 0.0);

  A->aux_data->aux_func_ptr  = A->getrow->func_ptr;
  A->getrow->func_ptr = ML_Aux_Getrow;
  A->aux_data->filter = filter;
  A->aux_data->filter_size = n;

  // Cleanup
  ML_free(x_coord);
  ML_free(y_coord);
  ML_free(z_coord);

}
Beispiel #19
0
void ML_rap_check(ML *ml, ML_Operator *RAP, ML_Operator *R,
        ML_Operator *A, ML_Operator *P, int iNvec, int oNvec)
{
   int i,j;
   double *vec1, *vec2, *vec3, *vec4, *vec5;
   double norm1, norm2;

#ifdef DEBUG
   printf("ML_rap_check begins ...\n");
#endif

   if (RAP->getrow->ML_id != ML_ID_MATRIX) {
      if (ml->comm->ML_mypid == 0)
         printf("ML_rap_check: RAP is the wrong object (=%d). \n",
                         RAP->getrow->ML_id);
      exit(1);
   }
   if (R->getrow->ML_id != ML_ID_MATRIX) {
      if (ml->comm->ML_mypid == 0)
        printf("ML_rap_check: R is the wrong object (=%d). \n",
                         RAP->getrow->ML_id);
      exit(1);
   }
   if (P->getrow->ML_id != ML_ID_MATRIX) {
      if (ml->comm->ML_mypid == 0)
        printf("ML_rap_check: P is the wrong object (=%d). \n",
                         RAP->getrow->ML_id);
      exit(1);
   }
   if (A->getrow->ML_id != ML_ID_MATRIX) {
      if (ml->comm->ML_mypid == 0)
        printf("ML_rap_check: A is the wrong object (=%d). \n",
                         RAP->getrow->ML_id);
      exit(1);
   }

   /* j is the number of external variables */

   j = 0;
   for (i = 0; i < RAP->getrow->pre_comm->N_neighbors; i++)
      j += RAP->getrow->pre_comm->neighbors[i].N_rcv;

   vec1 = (double *) ML_allocate((iNvec+ 1 + j)*sizeof(double));
   vec2 = (double *) ML_allocate((P->getrow->Nrows+1)*sizeof(double));
   vec3 = (double *) ML_allocate((A->getrow->Nrows+1)*sizeof(double));
   vec4 = (double *) ML_allocate((oNvec + 1)*sizeof(double));
   vec5 = (double *) ML_allocate((oNvec + 1)*sizeof(double));

   for (i = 0; i < iNvec; i++)
      vec1[i] = (double) (ml->comm->ML_mypid*2301 + i*7 + 1);

   j = P->getrow->Nrows;
   ML_getrow_matvec(P, vec1, iNvec, vec2,&j);
   i = A->getrow->Nrows;
   ML_getrow_matvec(A, vec2, j, vec3,&i);
   ML_getrow_matvec(R, vec3, i, vec4,&oNvec);

   /* j is the number of variables sent in communication */

   j = 0;
   for (i = 0; i < RAP->getrow->pre_comm->N_neighbors; i++)
      j += RAP->getrow->pre_comm->neighbors[i].N_send;

   ML_restricted_MSR_mult( RAP, oNvec, vec1, vec5, j);

   norm1 = sqrt(ML_gdot(oNvec, vec5, vec5, ml->comm));
   for (i = 0; i < oNvec; i++) vec5[i] -= vec4[i];
   norm2 = sqrt(ML_gdot(oNvec, vec5, vec5, ml->comm));

   if (norm2 > norm1*1e-10) {
      norm2 = sqrt(ML_gdot(oNvec, vec4, vec4, ml->comm));
      if (ml->comm->ML_mypid == 0) {
          printf("***************************************\n");
          printf("RAP seems inaccurate:\n");
          printf("    ||    RAP v    ||_2 = %e\n\n", norm1);
          printf("    || R (A (P v)) ||_2 = %e\n",norm2);
          printf("***************************************\n");
      }
   }

   ML_free(vec5);
   ML_free(vec4);
   ML_free(vec3);
   ML_free(vec2);
   ML_free(vec1);

#ifdef DEBUG
   printf("ML_rap_check ends ...\n");
#endif
}
Beispiel #20
0
int main(int argc, char *argv[])
{
	int num_PDE_eqns=5, N_levels=3;
    /* int nsmooth=1; */

	int    leng, level, N_grid_pts, coarsest_level;

  /* See Aztec User's Guide for more information on the */
  /* variables that follow.                             */

  int    proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
  double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE];

  /* data structure for matrix corresponding to the fine grid */

  int    *data_org = NULL, *update = NULL, *external = NULL;
  int    *update_index = NULL, *extern_index = NULL;
  int    *cpntr = NULL;
  int    *bindx = NULL, N_update, iii;
  double *val = NULL;
	double *xxx, *rhs;

	AZ_MATRIX *Amat;
	AZ_PRECOND *Pmat = NULL;
	ML *ml;
	FILE *fp;
  int ch,i;
   struct AZ_SCALING *scaling;
double solve_time, setup_time, start_time;
ML_Aggregate *ag;
int *ivec;
#ifdef VBR_VERSION
ML_Operator *B, *C, *D;
int *vbr_cnptr, *vbr_rnptr, *vbr_indx, *vbr_bindx, *vbr_bnptr, total_blk_rows;
int total_blk_cols, blk_space, nz_space;
double *vbr_val;
struct ML_CSR_MSRdata *csr_data;
#endif


#ifdef ML_MPI
  MPI_Init(&argc,&argv);

  /* get number of processors and the name of this processor */

  AZ_set_proc_config(proc_config, MPI_COMM_WORLD);
#else
  AZ_set_proc_config(proc_config, AZ_NOT_MPI);
#endif

#ifdef binary
	fp=fopen(".data","rb");
#else
	fp=fopen(".data","r");
#endif
	if (fp==NULL)
		{
			printf("couldn't open file .data\n");
			exit(1);
		}
#ifdef binary
        fread(&leng, sizeof(int), 1, fp);
#else
	fscanf(fp,"%d",&leng);
#endif

	fclose(fp);

	N_grid_pts=leng/num_PDE_eqns;



  /* initialize the list of global indices. NOTE: the list of global */
  /* indices must be in ascending order so that subsequent calls to  */
  /* AZ_find_index() will function properly. */

  AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns,
                 AZ_linear);

  AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config);

  /* This code is to fix things up so that we are sure we have */
  /* all block (including the ghost nodes the same size.       */

  AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update);


  AZ_transform(proc_config, &external, bindx, val,  update, &update_index,
	       &extern_index, &data_org, N_update, 0, 0, 0, &cpntr,
               AZ_MSR_MATRIX);

  Amat = AZ_matrix_create( leng );

#ifndef VBR_VERSION

  AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL);

  Amat->matrix_type  = data_org[AZ_matrix_type];

  data_org[AZ_N_rows]  = data_org[AZ_N_internal] + data_org[AZ_N_border];

#else

total_blk_rows = N_update/num_PDE_eqns;
total_blk_cols = total_blk_rows;
blk_space      = total_blk_rows*20;
nz_space       = blk_space*num_PDE_eqns*num_PDE_eqns;

vbr_cnptr = (int    *) ML_allocate(sizeof(int   )*(total_blk_cols+1));
vbr_rnptr = (int    *) ML_allocate(sizeof(int   )*(total_blk_cols+1));
vbr_bnptr = (int    *) ML_allocate(sizeof(int   )*(total_blk_cols+2));
vbr_indx  = (int    *) ML_allocate(sizeof(int   )*(blk_space+1));
vbr_bindx = (int    *) ML_allocate(sizeof(int   )*(blk_space+1));
vbr_val   = (double *) ML_allocate(sizeof(double)*(nz_space+1));

for (i = 0; i <= total_blk_cols; i++) vbr_cnptr[i] = num_PDE_eqns;

  AZ_msr2vbr(vbr_val, vbr_indx, vbr_rnptr,  vbr_cnptr, vbr_bnptr,
                vbr_bindx, bindx, val,
                total_blk_rows, total_blk_cols, blk_space,
                nz_space, -1);

  data_org[AZ_N_rows]  = data_org[AZ_N_internal] + data_org[AZ_N_border];
  data_org[AZ_N_int_blk]  = data_org[AZ_N_internal]/num_PDE_eqns;
  data_org[AZ_N_bord_blk] = data_org[AZ_N_bord_blk]/num_PDE_eqns;
  data_org[AZ_N_ext_blk]  = data_org[AZ_N_ext_blk]/num_PDE_eqns;
  data_org[AZ_matrix_type] = AZ_VBR_MATRIX;


  AZ_set_VBR(Amat, vbr_rnptr, vbr_cnptr, vbr_bnptr, vbr_indx, vbr_bindx,
             vbr_val, data_org, 0, NULL, AZ_LOCAL);

  Amat->matrix_type  = data_org[AZ_matrix_type];
#endif

  start_time = AZ_second();

  ML_Create(&ml, N_levels);
  ML_Set_PrintLevel(3);


  /* set up discretization matrix and matrix vector function */

  AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config);

  ML_Aggregate_Create( &ag );
  ML_Aggregate_Set_Threshold(ag,0.0);
  ML_Set_SpectralNormScheme_PowerMethod(ml);
/*
   To run SA:
     a) set damping factor to 1 and use power method
        ML_Aggregate_Set_DampingFactor(ag, 4./3.);
   To run NSA:
     a) set damping factor to 0
        ML_Aggregate_Set_DampingFactor(ag, 0.);
   To run NSR
     a) set damping factor to 1 and use power method
        ML_Aggregate_Set_DampingFactor(ag, 1.);
        ag->Restriction_smoothagg_transpose = ML_FALSE;
        ag->keep_agg_information=1;
        ag->keep_P_tentative=1;
     b) hack code so it calls the energy minimizing restriction
          line 2973 of ml_agg_genP.c
     c) turn on the NSR flag in ml_agg_energy_min.cpp
   To run Emin
     a) set min_eneryg = 2 and keep_agg_info = 1;
      ag->minimizing_energy=2;
      ag->keep_agg_information=1;
      ag->cheap_minimizing_energy = 0;
      ag->block_scaled_SA = 1;
*/
  ag->minimizing_energy=2;
  ag->keep_agg_information=1;
  ag->block_scaled_SA = 1;

  ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, num_PDE_eqns, NULL, N_update);
  ML_Aggregate_Set_MaxCoarseSize( ag, 20);

/*
ML_Aggregate_Set_RandomOrdering( ag );
ML_Aggregate_Set_DampingFactor(ag, .1);
ag->drop_tol_for_smoothing = 1.0e-3;
ML_Aggregate_Set_Threshold(ag, 1.0e-3);
ML_Aggregate_Set_MaxCoarseSize( ag, 300);
*/


	coarsest_level = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml, N_levels-1, ML_DECREASING, ag);
	coarsest_level = N_levels - coarsest_level;
	if ( proc_config[AZ_node] == 0 )
		printf("Coarse level = %d \n", coarsest_level);

	/* set up smoothers */

        AZ_defaults(options, params);

	for (level = N_levels-1; level > coarsest_level; level--) {
          /* This is the Aztec domain decomp/ilu smoother that we */
          /* usually use for this problem.                        */

/*
          options[AZ_precond] = AZ_dom_decomp;
          options[AZ_subdomain_solve] = AZ_ilut;
          params[AZ_ilut_fill] = 1.0;
          options[AZ_reorder] = 1;
          ML_Gen_SmootherAztec(ml, level, options, params,
                        proc_config, status, AZ_ONLY_PRECONDITIONER,
                        ML_PRESMOOTHER,NULL);
*/

          /*  Sparse approximate inverse smoother that acutally does both */
          /*  pre and post smoothing.                                     */
          /*

          ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth,
                                parasails_sym, parasails_thresh,
                                parasails_nlevels, parasails_filter,
                                parasails_loadbal, parasails_factorized);

          parasails_thresh /= 4.;
          */


          /* This is the symmetric Gauss-Seidel smoothing. In parallel,    */
          /* it is not a true Gauss-Seidel in that each processor          */
          /* does a Gauss-Seidel on its local submatrix independent of the */
          /* other processors.                                             */
          /*
	  ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_PRESMOOTHER, nsmooth,1.);
	  ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_POSTSMOOTHER,nsmooth,1.);
          */

          /* Block Gauss-Seidel with block size equal to #DOF per node.    */
          /* Not a true Gauss-Seidel in that each processor does a         */
          /* Gauss-Seidel on its local submatrix independent of the other  */
          /* processors.                                                   */
          /*

	  ML_Gen_Smoother_BlockGaussSeidel(ml,level,ML_PRESMOOTHER,
                                           nsmooth,0.67, num_PDE_eqns);
	  ML_Gen_Smoother_BlockGaussSeidel(ml,level,ML_POSTSMOOTHER,
                                           nsmooth, 0.67, num_PDE_eqns);
          */


  	  ML_Gen_Smoother_SymBlockGaussSeidel(ml,level,ML_POSTSMOOTHER,
                                                1, 1.0, num_PDE_eqns);
	}

        ML_Gen_CoarseSolverSuperLU( ml, coarsest_level);
	ML_Gen_Solver(ml, ML_MGW, N_levels-1, coarsest_level);
	AZ_defaults(options, params);

        options[AZ_solver]   = AZ_gmres;
        options[AZ_scaling]  = AZ_none;
        options[AZ_precond]  = AZ_user_precond;
/*
        options[AZ_conv]     = AZ_r0;
*/
        options[AZ_output]   = 1;
        options[AZ_max_iter] = 1500;
        options[AZ_poly_ord] = 5;
        options[AZ_kspace]   = 130;
        params[AZ_tol]       = 1.0e-8;
/*
options[AZ_precond] = AZ_dom_decomp;
options[AZ_subdomain_solve] = AZ_ilut;
params[AZ_ilut_fill] = 2.0;
*/

	AZ_set_ML_preconditioner(&Pmat, Amat, ml, options);
setup_time = AZ_second() - start_time;

	xxx = (double *) malloc( leng*sizeof(double));
	rhs=(double *)malloc(leng*sizeof(double));

	for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0;

        /* Set rhs */

        fp = fopen("AZ_capture_rhs.mat","r");
        if (fp == NULL) {
           if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n");
           AZ_random_vector(rhs, data_org, proc_config);
           AZ_reorder_vec(rhs, data_org, update_index, NULL);
        }
        else {
           fclose(fp);
	   ivec =(int *)malloc((leng+1)*sizeof(int));
           AZ_input_msr_matrix("AZ_capture_rhs.mat", update, &rhs, &ivec,
                                N_update, proc_config);
           free(ivec);
           AZ_reorder_vec(rhs, data_org, update_index, NULL);
        }

        /* Set x */

        fp = fopen("AZ_capture_init_guess.mat","r");
        if (fp != NULL) {
           fclose(fp);
	   ivec =(int *)malloc((leng+1)*sizeof(int));
           AZ_input_msr_matrix("AZ_capture_init_guess.mat",update, &xxx, &ivec,
                                N_update, proc_config);
           free(ivec);
           AZ_reorder_vec(xxx, data_org, update_index, NULL);
        }

        /* if Dirichlet BC ... put the answer in */

        for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) {
           if ( (val[i] > .99999999) && (val[i] < 1.0000001))
              xxx[i] = rhs[i];
        }

        fp = fopen("AZ_no_multilevel.dat","r");
        scaling = AZ_scaling_create();
start_time = AZ_second();
        if (fp != NULL) {
           fclose(fp);
           options[AZ_precond] = AZ_none;
           options[AZ_scaling] = AZ_sym_diag;
           options[AZ_ignore_scaling] = AZ_TRUE;

           options[AZ_keep_info] = 1;
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);

/*
           options[AZ_pre_calc] = AZ_reuse;
           options[AZ_conv] = AZ_expected_values;
           if (proc_config[AZ_node] == 0)
              printf("\n-------- Second solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);
           if (proc_config[AZ_node] == 0)
              printf("\n-------- Third solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);
*/
        }
        else {
           options[AZ_keep_info] = 1;
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);
           options[AZ_pre_calc] = AZ_reuse;
           options[AZ_conv] = AZ_expected_values;
/*
           if (proc_config[AZ_node] == 0)
              printf("\n-------- Second solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);
           if (proc_config[AZ_node] == 0)
              printf("\n-------- Third solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);
*/
        }
   solve_time = AZ_second() - start_time;

   if (proc_config[AZ_node] == 0)
      printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time);

   ML_Aggregate_Destroy(&ag);
   ML_Destroy(&ml);
   AZ_free((void *) Amat->data_org);
   AZ_free((void *) Amat->val);
   AZ_free((void *) Amat->bindx);
   AZ_free((void *) update);
   AZ_free((void *) external);
   AZ_free((void *) extern_index);
   AZ_free((void *) update_index);
   AZ_scaling_destroy(&scaling);
   if (Amat  != NULL) AZ_matrix_destroy(&Amat);
   if (Pmat  != NULL) AZ_precond_destroy(&Pmat);
   free(xxx);
   free(rhs);


#ifdef ML_MPI
  MPI_Finalize();
#endif

  return 0;

}
Beispiel #21
0
void ML_getrow_matvec(ML_Operator *matrix, double *vec, int Nvec, 
                      double *ovec, int *Novec)
{
   ML_Operator *temp, *temp2, *temp3, *temp4, *tptr;
   int *cols, i;
   int allocated, row_length;

   if (matrix->getrow->func_ptr == NULL) {
      printf("ML_getrow_matvec: empty object? \n");
      exit(1);
   }
   temp = ML_Operator_Create(matrix->comm);
   ML_Operator_Set_1Levels(temp, matrix->from, matrix->from);
   ML_Operator_Set_ApplyFuncData(temp,1,Nvec,vec,Nvec,NULL,0);

   ML_Operator_Set_Getrow(temp,Nvec, VECTOR_getrows);
   temp->max_nz_per_row = 1;
   temp->N_nonzeros     = Nvec;

   if (matrix->getrow->pre_comm != NULL) {
      ML_exchange_rows(temp, &temp2, matrix->getrow->pre_comm);
   }
   else temp2 = temp;

   ML_matmat_mult(matrix, temp2, &temp3);

   if (matrix->getrow->post_comm != NULL)
      ML_exchange_rows(temp3, &temp4, matrix->getrow->post_comm);
   else temp4 = temp3;

   allocated = temp4->getrow->Nrows + 1;
   cols = (int *) ML_allocate(allocated*sizeof(int));
   if (cols == NULL) {
      printf("no space in ML_getrow_matvec()\n");
      exit(1);
   }
   for (i = 0; i < temp4->getrow->Nrows; i++) {
      ML_get_matrix_row(temp4, 1, &i, &allocated , &cols, &ovec,
                   &row_length, i);
      if (allocated != temp4->getrow->Nrows + 1)
         printf("memory problems ... we can't reallocate here\n");
   }

   ML_free(cols);

   if ( *Novec != temp4->getrow->Nrows) {
     printf("Warning: The length of ML's output vector does not agree with\n");
     printf("         the user's length for the output vector (%d vs. %d).\n",
            *Novec, temp4->getrow->Nrows);
     printf("         indicate a problem.\n");
   }
   *Novec = temp4->getrow->Nrows;

   if (matrix->getrow->pre_comm != NULL) {
      tptr = temp2;
      while ( (tptr!= NULL) && (tptr->sub_matrix != temp))
         tptr = tptr->sub_matrix;
      if (tptr != NULL) tptr->sub_matrix = NULL;
      ML_RECUR_CSR_MSRdata_Destroy(temp2);
      ML_Operator_Destroy(&temp2);
   }
   if (matrix->getrow->post_comm != NULL) {
      tptr = temp4;
      while ( (tptr!= NULL) && (tptr->sub_matrix != temp3))
         tptr = tptr->sub_matrix;
      if (tptr != NULL) tptr->sub_matrix = NULL;
      ML_RECUR_CSR_MSRdata_Destroy(temp4);
      ML_Operator_Destroy(&temp4);
   }

   ML_Operator_Destroy(&temp);
   ML_RECUR_CSR_MSRdata_Destroy(temp3);
   ML_Operator_Destroy(&temp3);
}
Beispiel #22
0
void ML_interp_check(ML *ml, int coarse_level, int fine_level)
{
   int    ii, jj, ncoarse, nfine;
   double *c_data, *f_data, coords[3], dtemp, d2, dlargest;
   ML_GridFunc *coarse_funs, *fine_funs;
   void   *coarse_data, *fine_data;
   int    nfine_eqn, ncoarse_eqn, stride = 1;

   /* check an interpolated linear function */

   coarse_data = ml->SingleLevel[coarse_level].Grid->Grid;
   fine_data   = ml->SingleLevel[  fine_level].Grid->Grid;
   coarse_funs = ml->SingleLevel[coarse_level].Grid->gridfcn;
   fine_funs   = ml->SingleLevel[  fine_level].Grid->gridfcn;

   if ( (coarse_data==NULL)||(fine_data==NULL)) {
      printf("ML_interp_check: grid data not found?\n");
      exit(1);
   }
   if ( (coarse_funs==NULL)||(fine_funs==NULL)) {
      printf("ML_interp_check: grid functions not found?\n");
      exit(1);
   }
   if ( (coarse_funs->USR_grid_get_nvertices == 0) ||
        (  fine_funs->USR_grid_get_nvertices == 0)) {
      printf("ML_interp_check: USR_grid_get_nvertices not found?\n");
      exit(1);
   }

   ncoarse     = coarse_funs->USR_grid_get_nvertices(coarse_data);
   nfine       =   fine_funs->USR_grid_get_nvertices(  fine_data);
   nfine_eqn   = ml->SingleLevel[coarse_level].Pmat->outvec_leng;
   ncoarse_eqn = ml->SingleLevel[coarse_level].Pmat->invec_leng;

   c_data  = (double *) ML_allocate(ncoarse_eqn*sizeof(double));
   f_data  = (double *) ML_allocate(nfine_eqn*sizeof(double));
   for (ii = 0; ii < ncoarse_eqn; ii++) c_data[ii] = 0.;
   for (ii = 0; ii < nfine_eqn; ii++) f_data[ii] = 0.;

   /* ASSUMING that for each grid point on this processor there are a   */
   /* set of equations and that all points at a grid point are numbered */
   /* consecutively !!!!!!!!!!!!!!                                      */

   stride = nfine_eqn/nfine;
   for (ii = 0 ; ii < ncoarse ; ii++)  {
      coarse_funs->USR_grid_get_vertex_coordinate(coarse_data,ii,coords);
      for (jj = 0; jj < stride; jj++) {
         c_data[ii*stride + jj] = coords[0] + 3.*coords[1] + .5;
      }
   }

   ML_Operator_Apply(ml->SingleLevel[coarse_level].Pmat,ncoarse_eqn,c_data,
		     nfine_eqn,f_data);

   dlargest = 0.0;
   for (ii = 0 ; ii < nfine; ii++)  {
      fine_funs->USR_grid_get_vertex_coordinate(fine_data , ii, coords);
      dtemp = coords[0] + 3.*coords[1] + .5;
      d2 = ML_dabs(dtemp - f_data[ii*stride])/(ML_dabs(dtemp)+1.e-9);
      /* Ray debugging
      if ( d2 > 1.e-8)
         printf("%d: f_data[%d] = %e  %e | %e %e\n",ml->comm->ML_mypid,
                ii,f_data[ii*stride],dtemp,coords[0],coords[1]);
      */
      if ( d2 > dlargest) {
            dlargest = d2;
      }
   }
   ML_free(f_data);
   ML_free(c_data);
}
int main(int argc, char *argv[])
{
  int num_PDE_eqns=1, N_levels=3, nsmooth=2;

  int leng, level, N_grid_pts, coarsest_level;
  int leng1,leng2;
  /* See Aztec User's Guide for more information on the */
  /* variables that follow.                             */

  int    proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
  double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE];

  /* data structure for matrix corresponding to the fine grid */

  double *val = NULL, *xxx, *rhs, solve_time, setup_time, start_time;
  AZ_MATRIX *Amat;
  AZ_PRECOND *Pmat = NULL;
  ML *ml;
  FILE *fp;
  int i, j, Nrigid, *garbage, nblocks=0, *blocks = NULL, *block_pde=NULL;
  struct AZ_SCALING *scaling;
  ML_Aggregate *ag;
  double *mode, *rigid=NULL, alpha; 
  char filename[80];
  int    one = 1;
  int    proc,nprocs;
  char pathfilename[100];

#ifdef ML_MPI
  MPI_Init(&argc,&argv);
  /* get number of processors and the name of this processor */
  AZ_set_proc_config(proc_config, MPI_COMM_WORLD);
  proc   = proc_config[AZ_node];
  nprocs = proc_config[AZ_N_procs];
#else
  AZ_set_proc_config(proc_config, AZ_NOT_MPI);
  proc   = 0;
  nprocs = 1;
#endif

   if (proc_config[AZ_node] == 0) {
      sprintf(pathfilename,"%s/inputfile",argv[1]);
      ML_Reader_ReadInput(pathfilename, &context);
   }
   else context = (struct reader_context *) ML_allocate(sizeof(struct reader_context));
   AZ_broadcast((char *) context,  sizeof(struct reader_context), proc_config,
                AZ_PACK);
   AZ_broadcast((char *) NULL        ,   0          , proc_config, AZ_SEND);

   N_levels = context->N_levels;
   printf("N_levels %d\n",N_levels);
   nsmooth   = context->nsmooth;
   num_PDE_eqns = context->N_dofPerNode;
   printf("num_PDE_eqns %d\n",num_PDE_eqns);

   ML_Set_PrintLevel(context->output_level);

  /* read in the number of matrix equations */
  leng = 0;
  if (proc_config[AZ_node] == 0) {
        sprintf(pathfilename,"%s/data_matrix.txt",argv[1]);
        fp=fopen(pathfilename,"r");
     if (fp==NULL) {
        printf("**ERR** couldn't open file data_matrix.txt\n");
        exit(1);
     }
        fscanf(fp,"%d",&leng);
     fclose(fp);
  }
  leng = AZ_gsum_int(leng, proc_config);

  N_grid_pts=leng/num_PDE_eqns;


  /* initialize the list of global indices. NOTE: the list of global */
  /* indices must be in ascending order so that subsequent calls to  */
  /* AZ_find_index() will function properly. */
#if 0
  if (proc_config[AZ_N_procs] == 1) i = AZ_linear;
  else i = AZ_file;
#endif
  i = AZ_linear;

  /* cannot use AZ_input_update for variable blocks (forgot why, but debugged through it)*/
  /* make a linear distribution of the matrix       */
  /* if the linear distribution does not align with the blocks, */
  /* this is corrected in ML_AZ_Reader_ReadVariableBlocks */
  leng1 = leng/nprocs;
  leng2 = leng-leng1*nprocs;
  if (proc >= leng2)
  {
     leng2 += (proc*leng1);
  }
  else
  {
     leng1++;
     leng2 = proc*leng1;
  }
  N_update = leng1;
  update = (int*)AZ_allocate((N_update+1)*sizeof(int));
  if (update==NULL)
  {
      (void) fprintf (stderr, "Not enough space to allocate 'update'\n");
      fflush(stderr); exit(EXIT_FAILURE);
  }
  for (i=0; i<N_update; i++) update[i] = i+leng2;
  
#if 0 /* debug */
  printf("proc %d N_update %d\n",proc_config[AZ_node],N_update);
  fflush(stdout);                   
#endif
  sprintf(pathfilename,"%s/data_vblocks.txt",argv[1]);
  ML_AZ_Reader_ReadVariableBlocks(pathfilename,&nblocks,&blocks,&block_pde,
                                  &N_update,&update,proc_config);
#if 0 /* debug */
  printf("proc %d N_update %d\n",proc_config[AZ_node],N_update);
  fflush(stdout);                   
#endif

  sprintf(pathfilename,"%s/data_matrix.txt",argv[1]);
  AZ_input_msr_matrix(pathfilename,update, &val, &bindx, N_update, proc_config);

  /* This code is to fix things up so that we are sure we have   */ 
  /* all blocks (including the ghost nodes) the same size.       */
  /* not sure, whether this is a good idea with variable blocks  */
  /* the examples inpufiles (see top of this file) don't need it */
  /* anyway                                                      */
  /*
  AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update);
  */
  AZ_transform_norowreordering(proc_config, &external, bindx, val,  update, &update_index,
	       &extern_index, &data_org, N_update, 0, 0, 0, &cpntr,
	       AZ_MSR_MATRIX);
	
  Amat = AZ_matrix_create( leng );

  AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL);

  Amat->matrix_type  = data_org[AZ_matrix_type];
	
  data_org[AZ_N_rows]  = data_org[AZ_N_internal] + data_org[AZ_N_border];
			
  start_time = AZ_second();

  options[AZ_scaling] = AZ_none;

  ML_Create(&ml, N_levels);
			
			
  /* set up discretization matrix and matrix vector function */
  AZ_ML_Set_Amat(ml, 0, N_update, N_update, Amat, proc_config);

  ML_Set_ResidualOutputFrequency(ml, context->output);
  ML_Set_Tolerance(ml, context->tol);
  ML_Aggregate_Create( &ag );
  if (ML_strcmp(context->agg_coarsen_scheme,"Mis") == 0) {
     ML_Aggregate_Set_CoarsenScheme_MIS(ag);
  }
  else if (ML_strcmp(context->agg_coarsen_scheme,"Uncoupled") == 0) {
     ML_Aggregate_Set_CoarsenScheme_Uncoupled(ag);
  }
  else if (ML_strcmp(context->agg_coarsen_scheme,"Coupled") == 0) {
     ML_Aggregate_Set_CoarsenScheme_Coupled(ag);
  }
  else if (ML_strcmp(context->agg_coarsen_scheme,"Metis") == 0) {
     ML_Aggregate_Set_CoarsenScheme_METIS(ag);
     for (i=0; i<N_levels; i++)
        ML_Aggregate_Set_NodesPerAggr(ml,ag,i,9);
  }
  else if (ML_strcmp(context->agg_coarsen_scheme,"VBMetis") == 0) {
     /* when no blocks read, use standard metis assuming constant block sizes */
     if (!blocks) 
        ML_Aggregate_Set_CoarsenScheme_METIS(ag);
     else {
        ML_Aggregate_Set_CoarsenScheme_VBMETIS(ag);
        ML_Aggregate_Set_Vblocks_CoarsenScheme_VBMETIS(ag,0,N_levels,nblocks,
                                                       blocks,block_pde,N_update);
     }
     for (i=0; i<N_levels; i++)
        ML_Aggregate_Set_NodesPerAggr(ml,ag,i,9);
  }
  else {
     printf("**ERR** ML: Unknown aggregation scheme %s\n",context->agg_coarsen_scheme);
     exit(-1);
  }
  ML_Aggregate_Set_DampingFactor(ag, context->agg_damping);
  ML_Aggregate_Set_MaxCoarseSize( ag, context->maxcoarsesize);
  ML_Aggregate_Set_Threshold(ag, context->agg_thresh);

  if (ML_strcmp(context->agg_spectral_norm,"Calc") == 0) {
     ML_Set_SpectralNormScheme_Calc(ml);
  }
  else if (ML_strcmp(context->agg_spectral_norm,"Anorm") == 0) {
     ML_Set_SpectralNormScheme_Anorm(ml);
  }
  else {
     printf("**WRN** ML: Unknown spectral norm scheme %s\n",context->agg_spectral_norm);
  }

  /* read in the rigid body modes */

   Nrigid = 0;
   if (proc_config[AZ_node] == 0) {
      sprintf(filename,"data_nullsp%d.txt",Nrigid);
      sprintf(pathfilename,"%s/%s",argv[1],filename);
      while( (fp = fopen(pathfilename,"r")) != NULL) {
          fclose(fp);
          Nrigid++;
          sprintf(filename,"data_nullsp%d.txt",Nrigid);
          sprintf(pathfilename,"%s/%s",argv[1],filename);
      }
    }
    Nrigid = AZ_gsum_int(Nrigid,proc_config);

    if (Nrigid != 0) {
       rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) );
       if (rigid == NULL) {
          printf("Error: Not enough space for rigid body modes\n");
       }
    }

   /* Set rhs */
   sprintf(pathfilename,"%s/data_rhs.txt",argv[1]);
   fp = fopen(pathfilename,"r");
   if (fp == NULL) {
      rhs=(double *)ML_allocate(leng*sizeof(double));
      if (proc_config[AZ_node] == 0) printf("taking linear vector for rhs\n");
      for (i = 0; i < N_update; i++) rhs[i] = (double) update[i];
   }
   else {
      fclose(fp);
      if (proc_config[AZ_node] == 0) printf("reading rhs from a file\n");
      AZ_input_msr_matrix(pathfilename, update, &rhs, &garbage, N_update, 
                          proc_config);
   }
   AZ_reorder_vec(rhs, data_org, update_index, NULL);

   for (i = 0; i < Nrigid; i++) {
      sprintf(filename,"data_nullsp%d.txt",i);
      sprintf(pathfilename,"%s/%s",argv[1],filename);
      AZ_input_msr_matrix(pathfilename, update, &mode, &garbage, N_update, 
                          proc_config);
      AZ_reorder_vec(mode, data_org, update_index, NULL);

#if 0 /* test the given rigid body mode, output-vector should be ~0 */
       Amat->matvec(mode, rigid, Amat, proc_config);
       for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]);
#endif

    for (j = 0; j < i; j++) {
       alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/
                  AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), 
                               proc_config);
       DAXPY_F77(&N_update, &alpha,  &(rigid[j*N_update]),  &one, mode, &one);
    }
   
    /* rhs orthogonalization */

    alpha = -AZ_gdot(N_update, mode, rhs, proc_config)/
                    AZ_gdot(N_update, mode, mode, proc_config);
    DAXPY_F77(&N_update, &alpha,  mode,  &one, rhs, &one);

    for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j];
    free(mode);
    free(garbage);
  }

  for (j = 0; j < Nrigid; j++) {
     alpha = -AZ_gdot(N_update, rhs, &(rigid[j*N_update]), proc_config)/
              AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), 
                      proc_config);
     DAXPY_F77(&N_update, &alpha,  &(rigid[j*N_update]),  &one, rhs, &one);
  }

#if 0 /* for testing the default nullsp */
  ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, 6, NULL, N_update);
#else
  if (Nrigid != 0) {
     ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update);
  }
#endif
  if (rigid) ML_free(rigid);

  ag->keep_agg_information = 1;
  coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, 0, 
                                            ML_INCREASING, ag);
  coarsest_level--;                                            

  if ( proc_config[AZ_node] == 0 )
	printf("Coarse level = %d \n", coarsest_level);
	
#if 0
  /* set up smoothers */
  if (!blocks)
     blocks = (int *) ML_allocate(sizeof(int)*N_update);
#endif

  for (level = 0; level < coarsest_level; level++) {

      num_PDE_eqns = ml->Amat[level].num_PDEs;
		
     /*  Sparse approximate inverse smoother that acutally does both */
     /*  pre and post smoothing.                                     */

     if (ML_strcmp(context->smoother,"Parasails") == 0) {
        ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth, 
                                parasails_sym, parasails_thresh, 
                                parasails_nlevels, parasails_filter,
                                (int) parasails_loadbal, parasails_factorized);
     }

     /* This is the symmetric Gauss-Seidel smoothing that we usually use. */
     /* In parallel, it is not a true Gauss-Seidel in that each processor */
     /* does a Gauss-Seidel on its local submatrix independent of the     */
     /* other processors.                                                 */

     else if (ML_strcmp(context->smoother,"GaussSeidel") == 0) {
       ML_Gen_Smoother_GaussSeidel(ml , level, ML_BOTH, nsmooth,1.);
     }
     else if (ML_strcmp(context->smoother,"SymGaussSeidel") == 0) {
       ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_BOTH, nsmooth,1.);
     }
     else if (ML_strcmp(context->smoother,"Poly") == 0) {
       ML_Gen_Smoother_Cheby(ml, level, ML_BOTH, 30., nsmooth);
     }
     else if (ML_strcmp(context->smoother,"BlockGaussSeidel") == 0) {
       ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_BOTH, nsmooth,1.,
					 num_PDE_eqns);
     }
     else if (ML_strcmp(context->smoother,"VBSymGaussSeidel") == 0) {
         if (blocks)    ML_free(blocks);
         if (block_pde) ML_free(block_pde);
         blocks    = NULL;
         block_pde = NULL;
         nblocks   = 0;
         ML_Aggregate_Get_Vblocks_CoarsenScheme_VBMETIS(ag,level,N_levels,&nblocks,
                                                        &blocks,&block_pde);
         if (blocks==NULL) ML_Gen_Blocks_Aggregates(ag, level, &nblocks, &blocks);
         ML_Gen_Smoother_VBlockSymGaussSeidel(ml , level, ML_BOTH, nsmooth,1.,
                                              nblocks, blocks);
     }

     /* This is a true Gauss Seidel in parallel. This seems to work for  */
     /* elasticity problems.  However, I don't believe that this is very */
     /* efficient in parallel.                                           */       
     /*
      nblocks = ml->Amat[level].invec_leng;
      for (i =0; i < nblocks; i++) blocks[i] = i;
      ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml , level, ML_PRESMOOTHER,
                                                  nsmooth, 1., nblocks, blocks);
      ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml, level, ML_POSTSMOOTHER,
                                                  nsmooth, 1., nblocks, blocks);
     */

     /* Jacobi Smoothing                                                 */

     else if (ML_strcmp(context->smoother,"Jacobi") == 0) {
        ML_Gen_Smoother_Jacobi(ml , level, ML_PRESMOOTHER, nsmooth,.4);
        ML_Gen_Smoother_Jacobi(ml , level, ML_POSTSMOOTHER, nsmooth,.4);
     }

     /*  This does a block Gauss-Seidel (not true GS in parallel)        */
     /*  where each processor has 'nblocks' blocks.                      */
     /* */

     else if (ML_strcmp(context->smoother,"Metis") == 0) {
         if (blocks)    ML_free(blocks);
         if (block_pde) ML_free(block_pde);
         nblocks = 250;
         ML_Gen_Blocks_Metis(ml, level, &nblocks, &blocks);
         ML_Gen_Smoother_VBlockSymGaussSeidel(ml , level, ML_BOTH, nsmooth,1.,
                                        nblocks, blocks);
     }
     else {
         printf("unknown smoother %s\n",context->smoother);
         exit(1);
     }
   }
	
   /* set coarse level solver */
   nsmooth   = context->coarse_its;
   /*  Sparse approximate inverse smoother that acutally does both */
   /*  pre and post smoothing.                                     */

   if (ML_strcmp(context->coarse_solve,"Parasails") == 0) {
        ML_Gen_Smoother_ParaSails(ml , coarsest_level, ML_PRESMOOTHER, nsmooth, 
                                parasails_sym, parasails_thresh, 
                                parasails_nlevels, parasails_filter,
                                (int) parasails_loadbal, parasails_factorized);
   }

   else if (ML_strcmp(context->coarse_solve,"GaussSeidel") == 0) {
       ML_Gen_Smoother_GaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1.);
   }
   else if (ML_strcmp(context->coarse_solve,"Poly") == 0) {
     ML_Gen_Smoother_Cheby(ml, coarsest_level, ML_BOTH, 30., nsmooth);
   }
   else if (ML_strcmp(context->coarse_solve,"SymGaussSeidel") == 0) {
       ML_Gen_Smoother_SymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1.);
   }
   else if (ML_strcmp(context->coarse_solve,"BlockGaussSeidel") == 0) {
       ML_Gen_Smoother_BlockGaussSeidel(ml, coarsest_level, ML_BOTH, nsmooth,1.,
					num_PDE_eqns);
   }
   else if (ML_strcmp(context->coarse_solve,"Aggregate") == 0) {
         if (blocks)    ML_free(blocks);
         if (block_pde) ML_free(block_pde);
         ML_Gen_Blocks_Aggregates(ag, coarsest_level, &nblocks, &blocks);
         ML_Gen_Smoother_VBlockSymGaussSeidel(ml , coarsest_level, ML_BOTH, 
                                        nsmooth,1., nblocks, blocks);
   }
   else if (ML_strcmp(context->coarse_solve,"Jacobi") == 0) {
        ML_Gen_Smoother_Jacobi(ml , coarsest_level, ML_BOTH, nsmooth,.5);
   }
   else if (ML_strcmp(context->coarse_solve,"Metis") == 0) {
         if (blocks)    ML_free(blocks);
         if (block_pde) ML_free(block_pde);
         nblocks = 250;
         ML_Gen_Blocks_Metis(ml, coarsest_level, &nblocks, &blocks);
         ML_Gen_Smoother_VBlockSymGaussSeidel(ml , coarsest_level, ML_BOTH, 
                                              nsmooth,1., nblocks, blocks);
   }
   else if (ML_strcmp(context->coarse_solve,"SuperLU") == 0) {
      ML_Gen_CoarseSolverSuperLU( ml, coarsest_level);
   }
   else if (ML_strcmp(context->coarse_solve,"Amesos") == 0) {
      ML_Gen_Smoother_Amesos(ml,coarsest_level,ML_AMESOS_KLU,-1, 0.0);
   }
   else {
         printf("unknown coarse grid solver %s\n",context->coarse_solve);
         exit(1);
   }
		
   ML_Gen_Solver(ml, ML_MGV, 0, coarsest_level); 

   AZ_defaults(options, params);
	
   if (ML_strcmp(context->krylov,"Cg") == 0) {
      options[AZ_solver]   = AZ_cg;
   }
   else if (ML_strcmp(context->krylov,"Bicgstab") == 0) {
      options[AZ_solver]   = AZ_bicgstab;
   }
   else if (ML_strcmp(context->krylov,"Tfqmr") == 0) {
      options[AZ_solver]   = AZ_tfqmr;
   }
   else if (ML_strcmp(context->krylov,"Gmres") == 0) {
      options[AZ_solver]   = AZ_gmres;
   }
   else {
      printf("unknown krylov method %s\n",context->krylov);
   }
   if (blocks)            ML_free(blocks);
   if (block_pde)         ML_free(block_pde);
   options[AZ_scaling]  = AZ_none;
   options[AZ_precond]  = AZ_user_precond;
   options[AZ_conv]     = AZ_r0;
   options[AZ_output]   = 1;
   options[AZ_max_iter] = context->max_outer_its;
   options[AZ_poly_ord] = 5;
   options[AZ_kspace]   = 130;
   params[AZ_tol]       = context->tol;
   options[AZ_output]   = context->output;
   ML_free(context);
	
   AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); 
   setup_time = AZ_second() - start_time;
	
   xxx = (double *) malloc( leng*sizeof(double));

   for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; 
	

   /* Set x */
   /*
   there is no initguess supplied with these examples for the moment....
   */
   fp = fopen("initguessfile","r");
   if (fp != NULL) {
      fclose(fp);
      if (proc_config[AZ_node]== 0) printf("reading initial guess from file\n");
      AZ_input_msr_matrix("data_initguess.txt", update, &xxx, &garbage, N_update, 
                          proc_config);

      options[AZ_conv] = AZ_expected_values;
   }
   else if (proc_config[AZ_node]== 0) printf("taking 0 initial guess \n");

   AZ_reorder_vec(xxx, data_org, update_index, NULL);

   /* if Dirichlet BC ... put the answer in */

   for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) {
      if ( (val[i] > .99999999) && (val[i] < 1.0000001))
         xxx[i] = rhs[i];      
   }

   fp = fopen("AZ_no_multilevel.dat","r");
   scaling = AZ_scaling_create();
   start_time = AZ_second();
   if (fp != NULL) {
      fclose(fp);
      options[AZ_precond] = AZ_none;
      options[AZ_scaling] = AZ_sym_diag;
      options[AZ_ignore_scaling] = AZ_TRUE;

      options[AZ_keep_info] = 1;
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 

/*
      options[AZ_pre_calc] = AZ_reuse;
      options[AZ_conv] = AZ_expected_values;
      if (proc_config[AZ_node] == 0) 
              printf("\n-------- Second solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 
      if (proc_config[AZ_node] == 0) 
              printf("\n-------- Third solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 
*/
   }
   else {
      options[AZ_keep_info] = 1;
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
      options[AZ_pre_calc] = AZ_reuse;
      options[AZ_conv] = AZ_expected_values;
/*
      if (proc_config[AZ_node] == 0) 
              printf("\n-------- Second solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
      if (proc_config[AZ_node] == 0) 
              printf("\n-------- Third solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
*/
   }
   solve_time = AZ_second() - start_time;

   if (proc_config[AZ_node] == 0) 
      printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time);

   if (proc_config[AZ_node] == 0) 
     printf("Printing out a few entries of the solution ...\n");

   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 7) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 23) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 47) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 101) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 171) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}

   ML_Aggregate_Destroy(&ag);
   ML_Destroy(&ml);
   AZ_free((void *) Amat->data_org);
   AZ_free((void *) Amat->val);
   AZ_free((void *) Amat->bindx);
   AZ_free((void *) update);
   AZ_free((void *) external);
   AZ_free((void *) extern_index);
   AZ_free((void *) update_index);
   AZ_scaling_destroy(&scaling);
   if (Amat  != NULL) AZ_matrix_destroy(&Amat);
   if (Pmat  != NULL) AZ_precond_destroy(&Pmat);
   free(xxx);
   free(rhs);

#ifdef ML_MPI
  MPI_Finalize();
#endif
	
  return 0;
	
}
Beispiel #24
0
   char    instring[100], jobz, jobz2;
   FILE    *fp;
#endif

   Amat     = curr->Amat;
   Rmat     = curr->Rmat;
   pre      = curr->pre_smoother;
   post     = curr->post_smoother;
   csolve   = curr->csolve;
   lengf    = Amat->outvec_leng;

   /* ------------------------------------------------------------ */
   /* first do the normalization                                   */
   /* ------------------------------------------------------------ */

   rhss = (double *) ML_allocate( lengf * sizeof(double) );
   ML_DVector_GetDataPtr(curr->Amat_Normalization, &normalscales) ;
   for ( i = 0; i < lengf; i++ ) rhss[i] = rhs[i];

#ifdef ML_ANALYSIS
   if ( comm->ML_nprocs == 1 && curr->Pmat->to == NULL && lengf < 1000 )
   {
      fp = fopen("mlmatlab.m", "w");
      Nrows = lengf;
      fprintf(fp, "A = sparse(%d,%d);\n", Nrows, Nrows);
      allocated_space = 100;
      cols = (int    *) ML_allocate(allocated_space*sizeof(int   ));
      vals = (double *) ML_allocate(allocated_space*sizeof(double));
      for (i = 0; i < lengf; i++) {
         while(ML_Operator_Getrow(Amat,1,&i,allocated_space,cols,vals,&ncols)==0)
         {
int ML_Epetra::MultiLevelPreconditioner::SetupCoordinates()
{
  ML*     ml_ptr = 0;
  int     NumDimensions;
  double* in_x_coord = 0;
  double* in_y_coord = 0;
  double* in_z_coord = 0;

  // Check first for node coordinates, then for edge coordinates
  for (int ii=0; ii<2; ii++)
  {
    switch (ii) {
    case 0:
      ml_ptr = ml_;
      //??else ml_ptr = ml_;
      in_x_coord = List_.get("x-coordinates", (double *)0);
      in_y_coord = List_.get("y-coordinates", (double *)0);
      in_z_coord = List_.get("z-coordinates", (double *)0);
      break;

    case 1:
      if (AMGSolver_ == ML_MAXWELL) {
        ml_ptr = ml_nodes_;
        in_x_coord = List_.get("node: x-coordinates", (double *)0);
        in_y_coord = List_.get("node: y-coordinates", (double *)0);
        in_z_coord = List_.get("node: z-coordinates", (double *)0);
      }
      else {
        in_x_coord = NULL;
        in_y_coord = NULL;
        in_z_coord = NULL;
      }
      break;
    }

    NumDimensions  = 0;

    if (!(in_x_coord == 0 && in_y_coord == 0 && in_z_coord == 0))
    {
      ML_Aggregate_Viz_Stats *grid_info =
        (ML_Aggregate_Viz_Stats *) ml_ptr->Grid[LevelID_[0]].Grid;
      ML_Operator* AAA = &(ml_ptr->Amat[LevelID_[0]]);

      int n = AAA->invec_leng, Nghost = 0;

      if (AAA->getrow->pre_comm)
      {
        if (AAA->getrow->pre_comm->total_rcv_length <= 0)
          ML_CommInfoOP_Compute_TotalRcvLength(AAA->getrow->pre_comm);
        Nghost = AAA->getrow->pre_comm->total_rcv_length;
      }

      std::vector<double> tmp(Nghost + n);
      for (int i = 0 ; i < Nghost + n ; ++i)
        tmp[i] = 0.0;

      n /= NumPDEEqns_;
      Nghost /= NumPDEEqns_;

      if (in_x_coord)
      {
        NumDimensions++;
        double* x_coord = (double *) ML_allocate(sizeof(double) * (Nghost+n));

        for (int i = 0 ; i < n ; ++i)
          tmp[i * NumPDEEqns_] = in_x_coord[i];

        ML_exchange_bdry(&tmp[0],AAA->getrow->pre_comm, NumPDEEqns_ * n,
                         AAA->comm, ML_OVERWRITE,NULL);

        for (int i = 0 ; i < n + Nghost ; ++i)
          x_coord[i] = tmp[i * NumPDEEqns_];

        grid_info->x = x_coord;
      }

      if (in_y_coord)
      {
        NumDimensions++;
        double* y_coord = (double *) ML_allocate(sizeof(double) * (Nghost+n));

        for (int i = 0 ; i < n ; ++i)
          tmp[i * NumPDEEqns_] = in_y_coord[i];

        ML_exchange_bdry(&tmp[0],AAA->getrow->pre_comm, NumPDEEqns_ * n,
                         AAA->comm, ML_OVERWRITE,NULL);

        for (int i = 0 ; i < n + Nghost ; ++i)
          y_coord[i] = tmp[i * NumPDEEqns_];

        grid_info->y = y_coord;
      }

      if (in_z_coord)
      {
        NumDimensions++;
        double* z_coord = (double *) ML_allocate(sizeof(double) * (Nghost+n));

        for (int i = 0 ; i < n ; ++i)
          tmp[i * NumPDEEqns_] = in_z_coord[i];

        ML_exchange_bdry(&tmp[0],AAA->getrow->pre_comm, NumPDEEqns_ * n,
                         AAA->comm, ML_OVERWRITE,NULL);

        for (int i = 0 ; i < n + Nghost ; ++i)
          z_coord[i] = tmp[i * NumPDEEqns_];

        grid_info->z = z_coord;
      }

      grid_info->Ndim = NumDimensions;
    } // if (!(in_x_coord == 0 && in_y_coord == 0 && in_z_coord == 0))
  } //for (int ii=0; ii<2; ii++)

  return(0);
}
ML_Operator *user_T_build(struct user_partition *Edge_Partition,
		      struct user_partition *Node_Partition,
		      ML_Operator *Kn_mat, ML_Comm *comm)
{

  int nx, i, ii, jj, horv, Ncols, Nexterns;
  int *Tmat_bindx;
  double *Tmat_val;
  ML_Operator *Tmat;
  struct ML_CSR_MSRdata *csr_data;
  struct aztec_context *aztec_context;
  int global_id;
  int Nlocal_nodes, Nlocal_edges; 
  int nz_ptr;

  Nlocal_nodes = Node_Partition->Nlocal;
  Nlocal_edges = Edge_Partition->Nlocal;

  nx = (int) sqrt( ((double) Node_Partition->Nglobal) + .00001);

  Tmat_bindx = (int    *) malloc((3*Nlocal_edges+1)*sizeof(int));
  Tmat_val   = (double *) malloc((3*Nlocal_edges+1)*sizeof(double));

  Tmat_bindx[0] = Nlocal_edges + 1;
  for (i = 0; i < Nlocal_edges; i++) {
    global_id = (Edge_Partition->my_global_ids)[i];
    Tmat_val[i] = 0.0;

    invindex(global_id, &ii, &jj, nx, &horv);
    nz_ptr = Tmat_bindx[i];


    ii--;

    if (horv == HORIZONTAL) {
      if(ii != -1) {
	Tmat_bindx[nz_ptr] = southwest(ii,jj,nx); Tmat_val[nz_ptr++] = -1.;
      }
      Tmat_bindx[nz_ptr]   = southeast(ii,jj,nx); Tmat_val[nz_ptr++] =  1.; 
    }
    else {
      if (ii == -1) ii = nx-1;
      Tmat_bindx[nz_ptr] = northwest(ii,jj,nx); Tmat_val[nz_ptr++] = -1.;
      if (jj != 0) {
	Tmat_bindx[nz_ptr] = southwest(ii,jj,nx); Tmat_val[nz_ptr++] =  1.;}
    }
    Tmat_bindx[i+1] = nz_ptr;

  }

  csr_data = (struct ML_CSR_MSRdata *) ML_allocate(sizeof(struct ML_CSR_MSRdata));
  csr_data->columns = Tmat_bindx;
  csr_data->values  = Tmat_val;
  ML_MSR2CSR(csr_data, Nlocal_edges, &Ncols);
  aztec_context = (struct aztec_context *) Kn_mat->data;
  Nexterns = (aztec_context->Amat->data_org)[AZ_N_external];

  AZ_Tmat_transform2ml(Nexterns,   Node_Partition->needed_external_ids,
		       reordered_node_externs,
		       Tmat_bindx, Tmat_val, csr_data->rowptr, Nlocal_nodes,
		       Node_Partition->my_global_ids,
		       comm, Nlocal_edges, &Tmat);
  ML_free(csr_data);
  Tmat->data_destroy = ML_CSR_MSRdata_Destroy;
  ML_CommInfoOP_Clone(&(Tmat->getrow->pre_comm), Kn_mat->getrow->pre_comm);

  return(Tmat);
}