Example #1
0
void init_matrix_vector_structures(int proc_config[], int *update_index[], int
                                   *update[], int  *data_org[], int *external[],
                                   int *extern_index[], int input_option, double
                                   *val[], int *bindx[], int *indx[], int
                                   *bpntr[], int *rpntr[], int *cpntr[])


/*
 * Read in the points to be updated on this processor, create the global
 * distributed form of the application matrix, and then convert it to a
 * local distributed form for AZTEC kernels. Along the way, initialize the
 * following quantities:
 *     update_index[], update[], data_org[], a[], bindx[], bpntr[], cpntr[],
 *     rpntr[], indx[], external[], extern_index[].
 *
 * Author: Ray Tuminaro, Div 1422, SNL
 * Date:   3/15/95
 *
 * Parameters
 *
 *    proc_config    ==      On input, processor information:
 *                              proc_config[AZ_node] = name of this processor
 *                              proc_config[AZ_N_procs] = # of processors used
 *    update         ==      On output, list of pts to be updated on this node
 *    val,bindx      ==      On output, local distributed form of arrays
 *                           holding matrix values
 *    external       ==      On output, list of external vector elements
 *    update_index   ==      On output, ordering of update and external
 *    extern_index   ==      locally on this processor. For example
 *                           'update_index[i]' gives the index location
 *                           of the block which has the global index
 *                           'update[i]'.
 *    data_org       ==      On output, indicates how the data is set out on
 *                           this node. For example, data_org[] contains
 *                           information on how many unknowns are internal,
 *                           external, and border unknowns as well as which
 *                           points need to be communicated. See User's Guide
 *                           for more details.
 *    input_option   ==      Indicates how update[] will be initialized.
 *                           = 0, linear decomposition
 *                           = 1, points read from file 'update'.
 *                           = 2, box decomposition
 *                           See AZ_read_update() comments for more details.
 *
 *      The default finite difference MSR problem corresponds to a setting up
 *      a series of uncoupled 3D Poisson equations on a cube.
 *      To solve other problems, the call 'add_row_3D(...)' in
 *      'create_msr_matrix()' can be changed to 'add_row_5pt()' or
 *      'add_row_9pt()'.
 */

{

  int    N_update;            /* Number of pts updated on this processor     */
  int    MSRorVBR;
  int    chunks;
int blk_size, num_blk_cols,num_blk_rows,size,kk, convert_to_vbr = 0;
double *val2;
int    *bindx2;


  MSRorVBR = AZ_MSR_MATRIX;
  if (application == 1) MSRorVBR = AZ_VBR_MATRIX;

  chunks = num_PDE_eqns;
  if (MSRorVBR == AZ_VBR_MATRIX) chunks = 1;

  /* initialize the list of global indices. NOTE: the list of global */
  /* indices must be in ascending order so that subsequent calls to  */
  /* AZ_find_index() will function properly. */

  AZ_read_update(&N_update, update, proc_config, N_grid_pts, chunks,
                 input_option);

  /* create the matrix: each processor creates only the      */
  /* rows appearing in update[] ... however this row is      */
  /* created as if it were on a serial machine (i.e. using   */
  /* the global column numbers)                              */

  if (application == 1)
    create_vbr_matrix(*update, val, indx, N_update, rpntr, bpntr, bindx);
  else {
    *indx = NULL; *bpntr = NULL; *rpntr = NULL; *cpntr = NULL;

    if (application == 0) create_msr_matrix(*update, val, bindx, N_update);
    if (application == 2) create_fe_matrix(*update, proc_config[AZ_node],
                                           bindx, val, N_update);
    if (application == 3) { 
        AZ_read_msr_matrix(*update, val, bindx, N_update, proc_config);
    }
  }

  /* convert matrix to a distributed parallel matrix */

  AZ_transform(proc_config, external, *bindx, *val, *update, update_index,
               extern_index, data_org, N_update, *indx, *bpntr, *rpntr, cpntr,
               MSRorVBR);

  if ( (convert_to_vbr == 1) && (application == 3) ) {
     if (proc_config[AZ_node] == 0 ) {
	 printf("enter the block size\n");
	 scanf("%d",&blk_size);
     }
     AZ_broadcast((char *) &blk_size,  sizeof(int), proc_config, AZ_PACK);
     AZ_broadcast((char *) NULL         , 0          , proc_config, AZ_SEND);

     if ( N_update%blk_size != 0 ) {
        (void) fprintf(stderr," The block size must be a multiple of the number of rows per processor.\n");
        exit(-1);
     }

     num_blk_rows = N_update/blk_size;
     num_blk_cols = ( (*data_org)[AZ_N_external] + N_update)/blk_size;
     *cpntr = (int *) AZ_allocate( (num_blk_cols+2)*sizeof(int));
     *rpntr = (int *) AZ_allocate( (num_blk_cols+2)*sizeof(int));
     *bpntr = (int *) AZ_allocate( (num_blk_cols+2)*sizeof(int));
     size   = 20*(num_blk_cols+2);
     *indx  =  (int *) AZ_allocate(size*sizeof(int));
     bindx2 = *bindx;
     val2   = *val;
     *bindx = (int *) AZ_allocate(size*sizeof(int));
     *val   =  (double *) AZ_allocate(size*blk_size*blk_size*sizeof(double));

     for (kk = 0 ; kk < num_blk_cols ; kk++ ) (*cpntr)[kk] = blk_size;
     AZ_msr2vbr(*val,*indx,*rpntr,*cpntr,*bpntr,*bindx,bindx2,val2,
		num_blk_rows,num_blk_cols,size,size*blk_size*blk_size,blk_size);
     MSRorVBR = AZ_VBR_MATRIX;
     N_update /= blk_size;
     num_PDE_eqns = blk_size; 
     for (kk = 0 ; kk < N_update ; kk++ )
           (*update)[kk] = (*update)[blk_size*kk]/blk_size;
     for (kk = 0 ; kk < (*data_org)[AZ_N_external] ; kk++ ) 
           (*external)[kk] = (*external)[blk_size*kk]/blk_size;

     (*data_org)[AZ_matrix_type] = AZ_VBR_MATRIX;
     (*data_org)[AZ_N_int_blk ] /= blk_size;
     (*data_org)[AZ_N_bord_blk] /= blk_size;
     (*data_org)[AZ_N_ext_blk ] /= blk_size;
     AZ_free(bindx2);  AZ_free(val2);
  }


} /* init_matrix_vector_structures */
Example #2
0
int main(int argc, char *argv[])
{

  /* See Aztec User's Guide for the variables that follow:         */
  int    proc_config[AZ_PROC_SIZE];/* Processor information.                */
  int    N_update;                 /* # of unknowns updated on this node    */
  int    *update;                  /* vector elements updated on this node  */

  int    *data_orgA;               /* Array to specify data layout          */
  int    *externalA;               /* vector elements needed by this node.  */
  int    *update_indexA;           /* ordering of update[] and external[]   */
  int    *extern_indexA;           /* locally on this processor.            */
  int    *bindxA;                  /* Sparse matrix to be solved is stored  */
  double *valA;                    /* in these MSR arrays.                  */
  AZ_MATRIX *mat_curl_edge;        /* curl operator matrix                  */

  int    *data_orgB;                /* Array to specify data layout          */
  int    *externalB;                /* vector elements needed by this node.  */
  int    *update_indexB;            /* ordering of update[] and external[]   */
  int    *extern_indexB;            /* locally on this processor.            */
  int    *bindxB;                   /* Sparse matrix to be solved is stored  */
  double *valB;                     /* in these MSR arrays.                  */
  AZ_MATRIX *mat_curl_face;         /* curl operator matrix                  */

  int *bc_indx;
  int n_bc;

  double *efield;
  double *bfield;
  double *epsilon;
  double *tmp_vec;
  double *tmp_vec2;

  int    i, nrow, x, y, z;
  int k, t;
  long startTime, endTime;
  int myrank;
  int vec_len;


  /* get number of processors and the name of this processor */
#ifdef AZ_MPI
  MPI_Init(&argc,&argv);
  AZ_set_proc_config(proc_config, MPI_COMM_WORLD);
  MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
#else
  myrank = 0;
  AZ_set_proc_config(proc_config, AZ_NOT_MPI);
#endif

  nrow = ncomp * nx * ny * nz;  /* overll number of matrix rows  */

  // Define partitioning:  matrix rows (ascending order) owned by this node
  // Here it is done automatically, but it can also be specified by hand
  AZ_read_update(&N_update, &update, proc_config, nrow, 1, AZ_linear);


  // In the following we set up the matrix for the edge centered curl operator
  // All the steps are described in detail in the AZTEC manual.
  // first: allocate space for the first matrix.
  bindxA = (int    *) malloc((N_update*MAX_NZ_ROW+1)*sizeof(int));
  valA   = (double *) malloc((N_update*MAX_NZ_ROW+1)*sizeof(double));
  if (valA == NULL) perror("Error: Not enough space to create matrix");
  // Initialize the index for the first off diagonal element
  bindxA[0] = N_update+1;

  // Create the matrix row by row. Each processor creates only rows appearing
  // in update[] (using global col. numbers).
  for (i = 0; i < N_update; i++)
    create_curl_matrix_row_edge(update[i], i, valA, bindxA);

  // convert matrix to a local distributed matrix
  AZ_transform(proc_config, &externalA, bindxA, valA, update, &update_indexA,
               &extern_indexA, &data_orgA, N_update, NULL, NULL, NULL, NULL,
               AZ_MSR_MATRIX);

  // convert the matrix arrays into a matrix structure, used in the
  // matrix vector multiplication
  mat_curl_edge = AZ_matrix_create(data_orgA[AZ_N_internal] + data_orgA[AZ_N_border]);
  AZ_set_MSR(mat_curl_edge, bindxA, valA, data_orgA, 0, NULL, AZ_LOCAL);

  // at this point the edge centered curl matrix is completed.

  // In the following we set up the matrix for the face centered curl operator
  // All the steps are described in detail in the AZTEC manual.
  // first: allocate space for the first matrix.
  bindxB = (int    *) malloc((N_update*MAX_NZ_ROW+1)*sizeof(int));
  valB   = (double *) malloc((N_update*MAX_NZ_ROW+1)*sizeof(double));
  if (valB == NULL) perror("Error: Not enough space to create matrix");

  // Initialize the index for the first off diagonal element
  bindxB[0] = N_update+1;

  // Create the matrix row by row. Each processor creates only rows appearing
  // in update[] (using global col. numbers).
  for (i = 0; i < N_update; i++)
      create_curl_matrix_row_face(update[i], i, valB, bindxB);

  // convert matrix to a local distributed matrix
  AZ_transform(proc_config, &externalB, bindxB, valB, update, &update_indexB,
			                   &extern_indexB, &data_orgB,
					   N_update, NULL, NULL, NULL, NULL,
					                  AZ_MSR_MATRIX);
  // convert the matrix arrays into a matrix structure, used in the
  // matrix vector multiplication
  mat_curl_face = AZ_matrix_create(data_orgB[AZ_N_internal] + data_orgB[AZ_N_border]);
  AZ_set_MSR(mat_curl_face, bindxB, valB, data_orgB, 0, NULL, AZ_LOCAL);

  // at this point the face centered curl matrix is completed.


  //  allocate memory for the fields and a temporary vector
  vec_len = N_update + data_orgA[AZ_N_external];
  efield = (double *) malloc(vec_len*sizeof(double));
  bfield = (double *) malloc(vec_len*sizeof(double));
  epsilon = (double *) malloc(vec_len*sizeof(double));
  tmp_vec = (double *) malloc(vec_len*sizeof(double));
  tmp_vec2 = (double *) malloc(vec_len*sizeof(double));

  // setup the boundary condition. We will get an arry that tells us
  // which positions need to be updated and where the results needs
  // to be stored in the E field.
  setup_bc(update, update_indexB, N_update, &bc_indx, &n_bc);

  // initialize the field vectors
  for(k = 0; k < vec_len; k++){
     efield[k] = 0.;
     bfield[k] = 0.;
     epsilon[k] = 1.;
     tmp_vec[k] = 0.;
  }

  // initialize the dielectric structure. Ugly hard-coded stuff,
  // needs to be cleaned out...
  for(y=45; y<55; y++){
   for(x = y; x<100; x++)
      epsilon[compZ + pos_to_row(x, y, 0)] = 0.95;
  }	
  // reorder the dielectric vector in order to align with the B field
  AZ_reorder_vec(epsilon, data_orgA, update_indexA, NULL);


  printf("Begin iteration \n");

  // just some timing ...
  startTime = currentTimeMillis();

  // *******************
  // begin of the time stepping loop
  // *******************

  for( t = 0; t < nsteps; t++){

    // first we do the e field update

    // convert the B field to the H field

    for(k = 0 ; k < vec_len; k++)
      bfield[k] *= epsilon[k];

    // setup the initial condition
    for( k = 0; k < n_bc; k++){
      x = bc_indx[4*k];
      y = bc_indx[4*k+1];
      z = bc_indx[4*k+2];
      efield[bc_indx[4*k+3]] =
           sin((double) y * 5. * 3.14159 / (double) ny) *
           sin(omega * dt * (double) (t + 1));
    }

    //  E field update:
    //  tmp_vec = Curl_Op * bfield
    //  efield = efield +  c^2 * dt * tmp_vec
    AZ_MSR_matvec_mult( bfield, tmp_vec, mat_curl_edge, proc_config);

    // reorder the result in tmp_vec so that it aligns with the
    // decomposition of the E field
    AZ_invorder_vec(tmp_vec, data_orgA, update_indexA, NULL, tmp_vec2);
    AZ_reorder_vec(tmp_vec2, data_orgB, update_indexB, NULL);

    // update the efield
    for(k = 0 ; k < N_update; k++)
      efield[k] = efield[k] + c2 * tmp_vec2[k] * dt;

    // bfield update :
    // tmp_vec = DualCurl_Op * efield
    // bfield = bfield - tmp_vec * dt
    AZ_MSR_matvec_mult( efield, tmp_vec, mat_curl_face, proc_config);

    // reorder the result so that it fits the decomposition of the bfield
    AZ_invorder_vec(tmp_vec, data_orgB, update_indexB, NULL, tmp_vec2);
    AZ_reorder_vec(tmp_vec2, data_orgA, update_indexA, NULL);

    // update the b field
    for(k = 0;  k < N_update; k++)
	  bfield[k] = bfield[k] - tmp_vec2[k] * dt;

    if(myrank == 0)
      printf("Taking step %d at time %g\n", t,
		       (double) (currentTimeMillis() - startTime) / 1000.);
  }
  // ******************
  // end of timestepping loop
  // *****************

  endTime = currentTimeMillis();
  printf("After iteration: %g\n", (double)(endTime - startTime) / 1000. );

#if 1
  system("rm efield.txt bfield.txt");

  // dump filed data: efield
  AZ_invorder_vec(efield, data_orgB, update_indexB, NULL, tmp_vec);
  write_file("efield.txt", tmp_vec, N_update);

  // dump filed data: bfield
  AZ_invorder_vec(bfield, data_orgA, update_indexA, NULL, tmp_vec);
  write_file("bfield.txt", tmp_vec, N_update);
#endif

  /* Free allocated memory */
  AZ_matrix_destroy( &mat_curl_edge);
  free((void *) update);   free((void *) update_indexA);
  free((void *) externalA); free((void *) extern_indexA);
  free((void *) bindxA);    free((void *) valA);  free((void *) data_orgA);

  AZ_matrix_destroy( &mat_curl_face);
  free((void *) externalB); free((void *) extern_indexB);
  free((void *) bindxB);    free((void *) valB);  free((void *) data_orgB);

  free((void *) efield);    free((void *) bfield);
  free((void *) tmp_vec);   free((void *) tmp_vec2);


#ifdef AZ_MPI
  MPI_Finalize();
#endif
  return(1);

}
/*#define DEBUG */
void distrib_msr_matrix(int *proc_config, int N_global,
	      int *n_nonzeros, int *N_update, int **update,
	      double **val, int **bindx,
	      double **x, double **b, double **xexact)

{
  int i, n_entries, N_columns, n_global_nonzeros;
  int ii, j, row, have_xexact = 0 ;
  int kk = 0;
  int max_ii = 0, max_jj = 0;
  int ione = 1;
  double value;
  double *cnt;
  int *pntr, *bindx1, *pntr1;
  double *val1, *b1, *x1, *xexact1;
#ifdef DEBUG
  printf("Processor %d of %d entering distrib_matrix.\n",
	 proc_config[AZ_node],proc_config[AZ_N_procs]) ;
#endif

  /*************** Distribute global matrix to all processors ************/

  if(proc_config[AZ_node] == 0)
    {
      if ((*xexact) != NULL) have_xexact = 1;
#ifdef DEBUG
      printf("Broadcasting exact solution\n");
#endif
    }

  if(proc_config[AZ_N_procs]  > 1)
    { 

      AZ_broadcast((char *) &N_global,        sizeof(int), proc_config, AZ_PACK);
      AZ_broadcast((char *) n_nonzeros, sizeof(int), proc_config, AZ_PACK);
      AZ_broadcast((char *) &have_xexact, sizeof(int), proc_config, AZ_PACK);
      AZ_broadcast(NULL, 0, proc_config, AZ_SEND);

      if(proc_config[AZ_node] != 0)
	{
	  (*bindx) = (int   *) calloc(*n_nonzeros+1,sizeof(int)) ;
	  (*val) = (double *) calloc(*n_nonzeros+1,sizeof(double)) ;
	}

      AZ_broadcast((char *) (*bindx), sizeof(int)   *(*n_nonzeros+1), 
		   proc_config, AZ_PACK);
      AZ_broadcast(NULL, 0, proc_config, AZ_SEND);
      AZ_broadcast((char *) (*val),  sizeof(double)*(*n_nonzeros+1), 
		   proc_config, AZ_PACK);
      AZ_broadcast(NULL, 0, proc_config, AZ_SEND);

#ifdef DEBUG
      printf("Processor %d of %d done with matrix broadcast.\n",
	     proc_config[AZ_node],proc_config[AZ_N_procs]) ;
#endif
 
      /* Set rhs and initialize guess */
      if(proc_config[AZ_node] != 0)
	{
	  (*b) = (double *) calloc(N_global,sizeof(double)) ;
	  (*x) = (double *) calloc(N_global,sizeof(double)) ;
	  if (have_xexact)
	  (*xexact) =   (double *) calloc(N_global,sizeof(double)) ;
	}

      AZ_broadcast((char *) (*x), sizeof(double)*(N_global), proc_config, AZ_PACK);
      AZ_broadcast((char *) (*b), sizeof(double)*(N_global), proc_config, AZ_PACK);
      if (have_xexact)
	AZ_broadcast((char *) 
		     (*xexact), sizeof(double)*(N_global), proc_config, AZ_PACK);
      AZ_broadcast(NULL, 0, proc_config, AZ_SEND);
#ifdef DEBUG
      printf("Processor %d of %d done with rhs/guess broadcast.\n",
	     proc_config[AZ_node],proc_config[AZ_N_procs]) ;
#endif

    }

  /********************** Generate update map  *************************/

  AZ_read_update(N_update, update, proc_config, N_global,
           1, AZ_linear) ;
  
  printf("Processor %d of %d has %d rows of %d total rows.\n",
	 proc_config[AZ_node],proc_config[AZ_N_procs],*N_update,N_global) ;

  /*************** Construct local matrix from global matrix ************/

  /* The local matrix is a copy of the rows assigned to this processor.  
     It is stored in MSR format and still has global indices (AZ_transform
     will complete conversion to local indices.
  */

  if(proc_config[AZ_N_procs]  > 1)
    { 
      n_global_nonzeros = *n_nonzeros;

      *n_nonzeros = *N_update;
      
      for (i=0; i<*N_update; i++)
	*n_nonzeros += (*bindx)[(*update)[i]+1] - (*bindx)[(*update)[i]];

      printf("Processor %d of %d has %d nonzeros of %d total nonzeros.\n",
	     proc_config[AZ_node],proc_config[AZ_N_procs],
	     *n_nonzeros,n_global_nonzeros) ;

#ifdef DEBUG
      { double sum1 = 0.0;
      for (i=0;i<N_global; i++) sum1 += (*b)[i];

      printf("Processor %d of %d has sum of b = %12.4g.\n",
	     proc_config[AZ_node],proc_config[AZ_N_procs],sum1) ;
      }
#endif /* DEBUG */

      /* Allocate memory for local matrix */

      bindx1 = (int   *) calloc(*n_nonzeros+1,sizeof(int)) ;
      val1 = (double *) calloc(*n_nonzeros+1,sizeof(double)) ;
      b1 =   (double *) calloc(*N_update,sizeof(double)) ;
      x1 =   (double *) calloc(*N_update,sizeof(double)) ;
      if (have_xexact)
      xexact1 =   (double *) calloc(*N_update,sizeof(double)) ;
     
      bindx1[0] = *N_update+1;
      
      for (i=0; i<*N_update; i++)
	{
	  row = (*update)[i];
	  b1[i] = (*b)[row];
	  x1[i] = (*x)[row];
	  if (have_xexact) xexact1[i] = (*xexact)[row];
	  val1[i] = (*val)[row];
	  bindx1[i+1] = bindx1[i];

#ifdef DEBUG	  
	  printf("Proc %d of %d: Global row = %d: Local row = %d: 
                  b = %12.4g: x = %12.4g: bindx = %d: val = %12.4g \n",
		 proc_config[AZ_node],proc_config[AZ_N_procs], 
		 row, i, b1[i], x1[i], bindx1[i], val1[i]) ;
#endif

	  for (j = (*bindx)[row]; j < (*bindx)[row+1]; j++)
	    {
	      val1[  bindx1 [i+1] ] = (*val)[j];
	      bindx1[bindx1 [i+1] ] = (*bindx)[j];
	      bindx1[i+1] ++;
	    }
	}

#ifdef DEBUG
      printf("Processor %d of %d done with extracting local operators.\n",
	     proc_config[AZ_node],proc_config[AZ_N_procs]) ;

      if (have_xexact)
	{
	  printf(
     "The residual using MSR format and exact solution on processor %d is %12.4g\n",
	      proc_config[AZ_node],
	      smsrres (*N_update, N_global, val1, bindx1, xexact1, (*xexact), b1));
	}
  
#endif
      /* Release memory for global matrix, rhs and solution */
      
      free ((void *) (*val));
      free ((void *) (*bindx));
      free ((void *) (*b));
      free ((void *) (*x));
      if (have_xexact) free((void *) *xexact);

      /* Return local matrix through same pointers. */
      
      *val = val1;
      *bindx = bindx1;
      *b = b1;
      *x = x1;
      if (have_xexact) *xexact = xexact1;

    }
Example #4
0
int main(int argc, char *argv[])
{
	int num_PDE_eqns=5, N_levels=3;
    /* int nsmooth=1; */

	int    leng, level, N_grid_pts, coarsest_level;

  /* See Aztec User's Guide for more information on the */
  /* variables that follow.                             */

  int    proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
  double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE];

  /* data structure for matrix corresponding to the fine grid */

  int    *data_org = NULL, *update = NULL, *external = NULL;
  int    *update_index = NULL, *extern_index = NULL;
  int    *cpntr = NULL;
  int    *bindx = NULL, N_update, iii;
  double *val = NULL;
	double *xxx, *rhs;

	AZ_MATRIX *Amat;
	AZ_PRECOND *Pmat = NULL;
	ML *ml;
	FILE *fp;
  int ch,i;
   struct AZ_SCALING *scaling;
double solve_time, setup_time, start_time;
ML_Aggregate *ag;
int *ivec;
#ifdef VBR_VERSION
ML_Operator *B, *C, *D;
int *vbr_cnptr, *vbr_rnptr, *vbr_indx, *vbr_bindx, *vbr_bnptr, total_blk_rows;
int total_blk_cols, blk_space, nz_space;
double *vbr_val;
struct ML_CSR_MSRdata *csr_data;
#endif


#ifdef ML_MPI
  MPI_Init(&argc,&argv);

  /* get number of processors and the name of this processor */

  AZ_set_proc_config(proc_config, MPI_COMM_WORLD);
#else
  AZ_set_proc_config(proc_config, AZ_NOT_MPI);
#endif

#ifdef binary
	fp=fopen(".data","rb");
#else
	fp=fopen(".data","r");
#endif
	if (fp==NULL)
		{
			printf("couldn't open file .data\n");
			exit(1);
		}
#ifdef binary
        fread(&leng, sizeof(int), 1, fp);
#else
	fscanf(fp,"%d",&leng);
#endif

	fclose(fp);

	N_grid_pts=leng/num_PDE_eqns;



  /* initialize the list of global indices. NOTE: the list of global */
  /* indices must be in ascending order so that subsequent calls to  */
  /* AZ_find_index() will function properly. */

  AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns,
                 AZ_linear);

  AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config);

  /* This code is to fix things up so that we are sure we have */
  /* all block (including the ghost nodes the same size.       */

  AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update);


  AZ_transform(proc_config, &external, bindx, val,  update, &update_index,
	       &extern_index, &data_org, N_update, 0, 0, 0, &cpntr,
               AZ_MSR_MATRIX);

  Amat = AZ_matrix_create( leng );

#ifndef VBR_VERSION

  AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL);

  Amat->matrix_type  = data_org[AZ_matrix_type];

  data_org[AZ_N_rows]  = data_org[AZ_N_internal] + data_org[AZ_N_border];

#else

total_blk_rows = N_update/num_PDE_eqns;
total_blk_cols = total_blk_rows;
blk_space      = total_blk_rows*20;
nz_space       = blk_space*num_PDE_eqns*num_PDE_eqns;

vbr_cnptr = (int    *) ML_allocate(sizeof(int   )*(total_blk_cols+1));
vbr_rnptr = (int    *) ML_allocate(sizeof(int   )*(total_blk_cols+1));
vbr_bnptr = (int    *) ML_allocate(sizeof(int   )*(total_blk_cols+2));
vbr_indx  = (int    *) ML_allocate(sizeof(int   )*(blk_space+1));
vbr_bindx = (int    *) ML_allocate(sizeof(int   )*(blk_space+1));
vbr_val   = (double *) ML_allocate(sizeof(double)*(nz_space+1));

for (i = 0; i <= total_blk_cols; i++) vbr_cnptr[i] = num_PDE_eqns;

  AZ_msr2vbr(vbr_val, vbr_indx, vbr_rnptr,  vbr_cnptr, vbr_bnptr,
                vbr_bindx, bindx, val,
                total_blk_rows, total_blk_cols, blk_space,
                nz_space, -1);

  data_org[AZ_N_rows]  = data_org[AZ_N_internal] + data_org[AZ_N_border];
  data_org[AZ_N_int_blk]  = data_org[AZ_N_internal]/num_PDE_eqns;
  data_org[AZ_N_bord_blk] = data_org[AZ_N_bord_blk]/num_PDE_eqns;
  data_org[AZ_N_ext_blk]  = data_org[AZ_N_ext_blk]/num_PDE_eqns;
  data_org[AZ_matrix_type] = AZ_VBR_MATRIX;


  AZ_set_VBR(Amat, vbr_rnptr, vbr_cnptr, vbr_bnptr, vbr_indx, vbr_bindx,
             vbr_val, data_org, 0, NULL, AZ_LOCAL);

  Amat->matrix_type  = data_org[AZ_matrix_type];
#endif

  start_time = AZ_second();

  ML_Create(&ml, N_levels);
  ML_Set_PrintLevel(3);


  /* set up discretization matrix and matrix vector function */

  AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config);

  ML_Aggregate_Create( &ag );
  ML_Aggregate_Set_Threshold(ag,0.0);
  ML_Set_SpectralNormScheme_PowerMethod(ml);
/*
   To run SA:
     a) set damping factor to 1 and use power method
        ML_Aggregate_Set_DampingFactor(ag, 4./3.);
   To run NSA:
     a) set damping factor to 0
        ML_Aggregate_Set_DampingFactor(ag, 0.);
   To run NSR
     a) set damping factor to 1 and use power method
        ML_Aggregate_Set_DampingFactor(ag, 1.);
        ag->Restriction_smoothagg_transpose = ML_FALSE;
        ag->keep_agg_information=1;
        ag->keep_P_tentative=1;
     b) hack code so it calls the energy minimizing restriction
          line 2973 of ml_agg_genP.c
     c) turn on the NSR flag in ml_agg_energy_min.cpp
   To run Emin
     a) set min_eneryg = 2 and keep_agg_info = 1;
      ag->minimizing_energy=2;
      ag->keep_agg_information=1;
      ag->cheap_minimizing_energy = 0;
      ag->block_scaled_SA = 1;
*/
  ag->minimizing_energy=2;
  ag->keep_agg_information=1;
  ag->block_scaled_SA = 1;

  ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, num_PDE_eqns, NULL, N_update);
  ML_Aggregate_Set_MaxCoarseSize( ag, 20);

/*
ML_Aggregate_Set_RandomOrdering( ag );
ML_Aggregate_Set_DampingFactor(ag, .1);
ag->drop_tol_for_smoothing = 1.0e-3;
ML_Aggregate_Set_Threshold(ag, 1.0e-3);
ML_Aggregate_Set_MaxCoarseSize( ag, 300);
*/


	coarsest_level = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml, N_levels-1, ML_DECREASING, ag);
	coarsest_level = N_levels - coarsest_level;
	if ( proc_config[AZ_node] == 0 )
		printf("Coarse level = %d \n", coarsest_level);

	/* set up smoothers */

        AZ_defaults(options, params);

	for (level = N_levels-1; level > coarsest_level; level--) {
          /* This is the Aztec domain decomp/ilu smoother that we */
          /* usually use for this problem.                        */

/*
          options[AZ_precond] = AZ_dom_decomp;
          options[AZ_subdomain_solve] = AZ_ilut;
          params[AZ_ilut_fill] = 1.0;
          options[AZ_reorder] = 1;
          ML_Gen_SmootherAztec(ml, level, options, params,
                        proc_config, status, AZ_ONLY_PRECONDITIONER,
                        ML_PRESMOOTHER,NULL);
*/

          /*  Sparse approximate inverse smoother that acutally does both */
          /*  pre and post smoothing.                                     */
          /*

          ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth,
                                parasails_sym, parasails_thresh,
                                parasails_nlevels, parasails_filter,
                                parasails_loadbal, parasails_factorized);

          parasails_thresh /= 4.;
          */


          /* This is the symmetric Gauss-Seidel smoothing. In parallel,    */
          /* it is not a true Gauss-Seidel in that each processor          */
          /* does a Gauss-Seidel on its local submatrix independent of the */
          /* other processors.                                             */
          /*
	  ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_PRESMOOTHER, nsmooth,1.);
	  ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_POSTSMOOTHER,nsmooth,1.);
          */

          /* Block Gauss-Seidel with block size equal to #DOF per node.    */
          /* Not a true Gauss-Seidel in that each processor does a         */
          /* Gauss-Seidel on its local submatrix independent of the other  */
          /* processors.                                                   */
          /*

	  ML_Gen_Smoother_BlockGaussSeidel(ml,level,ML_PRESMOOTHER,
                                           nsmooth,0.67, num_PDE_eqns);
	  ML_Gen_Smoother_BlockGaussSeidel(ml,level,ML_POSTSMOOTHER,
                                           nsmooth, 0.67, num_PDE_eqns);
          */


  	  ML_Gen_Smoother_SymBlockGaussSeidel(ml,level,ML_POSTSMOOTHER,
                                                1, 1.0, num_PDE_eqns);
	}

        ML_Gen_CoarseSolverSuperLU( ml, coarsest_level);
	ML_Gen_Solver(ml, ML_MGW, N_levels-1, coarsest_level);
	AZ_defaults(options, params);

        options[AZ_solver]   = AZ_gmres;
        options[AZ_scaling]  = AZ_none;
        options[AZ_precond]  = AZ_user_precond;
/*
        options[AZ_conv]     = AZ_r0;
*/
        options[AZ_output]   = 1;
        options[AZ_max_iter] = 1500;
        options[AZ_poly_ord] = 5;
        options[AZ_kspace]   = 130;
        params[AZ_tol]       = 1.0e-8;
/*
options[AZ_precond] = AZ_dom_decomp;
options[AZ_subdomain_solve] = AZ_ilut;
params[AZ_ilut_fill] = 2.0;
*/

	AZ_set_ML_preconditioner(&Pmat, Amat, ml, options);
setup_time = AZ_second() - start_time;

	xxx = (double *) malloc( leng*sizeof(double));
	rhs=(double *)malloc(leng*sizeof(double));

	for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0;

        /* Set rhs */

        fp = fopen("AZ_capture_rhs.mat","r");
        if (fp == NULL) {
           if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n");
           AZ_random_vector(rhs, data_org, proc_config);
           AZ_reorder_vec(rhs, data_org, update_index, NULL);
        }
        else {
           fclose(fp);
	   ivec =(int *)malloc((leng+1)*sizeof(int));
           AZ_input_msr_matrix("AZ_capture_rhs.mat", update, &rhs, &ivec,
                                N_update, proc_config);
           free(ivec);
           AZ_reorder_vec(rhs, data_org, update_index, NULL);
        }

        /* Set x */

        fp = fopen("AZ_capture_init_guess.mat","r");
        if (fp != NULL) {
           fclose(fp);
	   ivec =(int *)malloc((leng+1)*sizeof(int));
           AZ_input_msr_matrix("AZ_capture_init_guess.mat",update, &xxx, &ivec,
                                N_update, proc_config);
           free(ivec);
           AZ_reorder_vec(xxx, data_org, update_index, NULL);
        }

        /* if Dirichlet BC ... put the answer in */

        for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) {
           if ( (val[i] > .99999999) && (val[i] < 1.0000001))
              xxx[i] = rhs[i];
        }

        fp = fopen("AZ_no_multilevel.dat","r");
        scaling = AZ_scaling_create();
start_time = AZ_second();
        if (fp != NULL) {
           fclose(fp);
           options[AZ_precond] = AZ_none;
           options[AZ_scaling] = AZ_sym_diag;
           options[AZ_ignore_scaling] = AZ_TRUE;

           options[AZ_keep_info] = 1;
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);

/*
           options[AZ_pre_calc] = AZ_reuse;
           options[AZ_conv] = AZ_expected_values;
           if (proc_config[AZ_node] == 0)
              printf("\n-------- Second solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);
           if (proc_config[AZ_node] == 0)
              printf("\n-------- Third solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);
*/
        }
        else {
           options[AZ_keep_info] = 1;
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);
           options[AZ_pre_calc] = AZ_reuse;
           options[AZ_conv] = AZ_expected_values;
/*
           if (proc_config[AZ_node] == 0)
              printf("\n-------- Second solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);
           if (proc_config[AZ_node] == 0)
              printf("\n-------- Third solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);
*/
        }
   solve_time = AZ_second() - start_time;

   if (proc_config[AZ_node] == 0)
      printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time);

   ML_Aggregate_Destroy(&ag);
   ML_Destroy(&ml);
   AZ_free((void *) Amat->data_org);
   AZ_free((void *) Amat->val);
   AZ_free((void *) Amat->bindx);
   AZ_free((void *) update);
   AZ_free((void *) external);
   AZ_free((void *) extern_index);
   AZ_free((void *) update_index);
   AZ_scaling_destroy(&scaling);
   if (Amat  != NULL) AZ_matrix_destroy(&Amat);
   if (Pmat  != NULL) AZ_precond_destroy(&Pmat);
   free(xxx);
   free(rhs);


#ifdef ML_MPI
  MPI_Finalize();
#endif

  return 0;

}
Example #5
0
int main(int argc, char *argv[])
{
	int num_PDE_eqns=3, N_levels=3, nsmooth=1;

	int    leng, level, N_grid_pts, coarsest_level;

  /* See Aztec User's Guide for more information on the */
  /* variables that follow.                             */

  int    proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
  double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE];

  /* data structure for matrix corresponding to the fine grid */

  int    *data_org = NULL, *update = NULL, *external = NULL;
  int    *update_index = NULL, *extern_index = NULL;
  int    *cpntr = NULL;
  int    *bindx = NULL, N_update, iii;
  double *val = NULL;
	double *xxx, *rhs;

	AZ_MATRIX *Amat;
	AZ_PRECOND *Pmat = NULL;
	ML *ml;
	FILE *fp;
  int ch,i,j, Nrigid, *garbage;
   struct AZ_SCALING *scaling;
double solve_time, setup_time, start_time, *mode, *rigid;
ML_Aggregate *ag;
int  nblocks, *blocks;
char filename[80];
double alpha;
int one = 1;


#ifdef ML_MPI
  MPI_Init(&argc,&argv);

  /* get number of processors and the name of this processor */

  AZ_set_proc_config(proc_config, MPI_COMM_WORLD);
#else
  AZ_set_proc_config(proc_config, AZ_NOT_MPI);
#endif

leng = 0;
if (proc_config[AZ_node] == 0) {
#ifdef binary
	fp=fopen(".data","rb");
#else
	fp=fopen(".data","r");
#endif
	if (fp==NULL)
		{
			printf("couldn't open file .data\n");
			exit(1);
		}
#ifdef binary
        fread(&leng, sizeof(int), 1, fp);
#else
	fscanf(fp,"%d",&leng);
#endif

	fclose(fp);
}
leng = AZ_gsum_int(leng, proc_config);

	N_grid_pts=leng/num_PDE_eqns;



  /* initialize the list of global indices. NOTE: the list of global */
  /* indices must be in ascending order so that subsequent calls to  */
  /* AZ_find_index() will function properly. */
	
  AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns,
                 AZ_linear);
	
	
  AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config);

  AZ_transform(proc_config, &external, bindx, val,  update, &update_index,
	       &extern_index, &data_org, N_update, 0, 0, 0, &cpntr, 
               AZ_MSR_MATRIX);
	
  Amat = AZ_matrix_create( leng );
  AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL);

  Amat->matrix_type  = data_org[AZ_matrix_type];
	
  data_org[AZ_N_rows]  = data_org[AZ_N_internal] + data_org[AZ_N_border];
			
  start_time = AZ_second();

AZ_defaults(options, params);
/*
scaling = AZ_scaling_create();
xxx = (double *) calloc( leng,sizeof(double));
rhs=(double *)calloc(leng,sizeof(double));
options[AZ_scaling] = AZ_sym_diag;
options[AZ_precond] = AZ_none;
options[AZ_max_iter] = 30;
options[AZ_keep_info] = 1;
AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 
don't forget vector rescaling ...
free(xxx);
free(rhs);
*/
options[AZ_scaling] = AZ_none;
	



  ML_Create(&ml, N_levels);
			
			
  /* set up discretization matrix and matrix vector function */
	
  AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config);
	
  ML_Aggregate_Create( &ag );

  Nrigid = 0;
if (proc_config[AZ_node] == 0) {
  sprintf(filename,"rigid_body_mode%d",Nrigid+1);
  while( (fp = fopen(filename,"r")) != NULL) {
     fclose(fp);
     Nrigid++;
     sprintf(filename,"rigid_body_mode%d",Nrigid+1);
  }
}
Nrigid = AZ_gsum_int(Nrigid,proc_config);

  if (Nrigid != 0) {
     rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) );
     if (rigid == NULL) {
        printf("Error: Not enough space for rigid body modes\n");
     }
  }

rhs=(double *)malloc(leng*sizeof(double));
AZ_random_vector(rhs, data_org, proc_config);
  
  for (i = 0; i < Nrigid; i++) {
     sprintf(filename,"rigid_body_mode%d",i+1);
     AZ_input_msr_matrix(filename, update, &mode, &garbage, 
                         N_update, proc_config);


/*
AZ_sym_rescale_sl(mode, Amat->data_org, options, proc_config, scaling);
*/
/*
Amat->matvec(mode, rigid, Amat, proc_config);
for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]);
*/
for (j = 0; j < i; j++) {
alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config);
daxpy_(&N_update, &alpha,  &(rigid[j*N_update]),  &one, mode, &one);
printf("alpha1 is %e\n",alpha);
}
alpha = -AZ_gdot(N_update, mode, rhs, proc_config)/AZ_gdot(N_update, mode, mode, proc_config);
printf("alpha2 is %e\n",alpha);
daxpy_(&N_update, &alpha,  mode,  &one, rhs, &one);

  
     for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j];
     free(mode);
     free(garbage);
  }
for (j = 0; j < Nrigid; j++) {
alpha = -AZ_gdot(N_update, rhs, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config);
daxpy_(&N_update, &alpha,  &(rigid[j*N_update]),  &one, rhs, &one);
printf("alpha4 is %e\n",alpha);
}


for (i = 0; i < Nrigid; i++) {
  alpha = -AZ_gdot(N_update, &(rigid[i*N_update]), rhs, proc_config);
  printf("alpha is %e\n",alpha);
}
  if (Nrigid != 0) {
     ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update);
/*
     free(rigid);
*/
  }

	coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, N_levels-1, ML_DECREASING, ag);
	coarsest_level = N_levels - coarsest_level;
/*
ML_Operator_Print(&(ml->Pmat[N_levels-2]), "Pmat");
exit(1);
*/

	if ( proc_config[AZ_node] == 0 )
		printf("Coarse level = %d \n", coarsest_level);
	
	/* set up smoothers */
	
	for (level = N_levels-1; level > coarsest_level; level--) {
j = 10;
if (level == N_levels-1) j = 10;
options[AZ_solver] = AZ_cg;
options[AZ_precond]=AZ_sym_GS; options[AZ_subdomain_solve]=AZ_icc;
/*
options[AZ_precond] = AZ_none;
*/
options[AZ_poly_ord] = 5;
ML_Gen_SmootherAztec(ml, level, options, params, proc_config, status,
j, ML_PRESMOOTHER,NULL);
ML_Gen_SmootherAztec(ml, level, options, params, proc_config, status,
j, ML_POSTSMOOTHER,NULL);
/*
		ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth,1.0);
		ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth,1.0);
*/
/*
                nblocks = ML_Aggregate_Get_AggrCount( ag, level );
                ML_Aggregate_Get_AggrMap( ag, level, &blocks);
                ML_Gen_Smoother_VBlockSymGaussSeidel( ml , level, ML_BOTH, nsmooth, 1.0,
                                                 nblocks, blocks);
                ML_Gen_Smoother_VBlockSymGaussSeidel( ml , level, ML_POSTSMOOTHER, nsmooth, 1.0, 
                                                 nblocks, blocks);
*/
/*
                ML_Gen_Smoother_VBlockJacobi( ml , level, ML_PRESMOOTHER, nsmooth, .5,
                                                 nblocks, blocks);
                ML_Gen_Smoother_VBlockJacobi( ml , level, ML_POSTSMOOTHER, nsmooth,.5,
                                                 nblocks, blocks);
*/
/*
		ML_Gen_Smoother_GaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth);
		ML_Gen_Smoother_GaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth);    
*/
/* 
need to change this when num_pdes is different on different levels
*/
/*
if (level == N_levels-1) {
		ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth, 0.5, num_PDE_eqns);
		ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth, 0.5, num_PDE_eqns);
}
else {
		ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth, 0.5, 2*num_PDE_eqns);
		ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth, 0.5, 2*num_PDE_eqns);
}
*/
/*
*/

/*
			ML_Gen_SmootherJacobi(ml , level, ML_PRESMOOTHER, nsmooth, .67);
			ML_Gen_SmootherJacobi(ml , level, ML_POSTSMOOTHER, nsmooth, .67 );
*/
		
		
	}
	
/*
	ML_Gen_CoarseSolverSuperLU( ml, coarsest_level);
*/
/*
ML_Gen_SmootherSymGaussSeidel(ml , coarsest_level, ML_PRESMOOTHER, 2*nsmooth,1.);
*/
/*
ML_Gen_SmootherBlockGaussSeidel(ml , level, ML_PRESMOOTHER, 50*nsmooth, 1.0, 2*num_PDE_eqns);
*/
ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, 2*nsmooth, 1.0, num_PDE_eqns);
		
	
	ML_Gen_Solver(ml, ML_MGV, N_levels-1, coarsest_level); 
	AZ_defaults(options, params);
	
        options[AZ_solver]   = AZ_GMRESR;
        options[AZ_scaling]  = AZ_none;
        options[AZ_precond]  = AZ_user_precond;
        options[AZ_conv]     = AZ_rhs;
        options[AZ_output]   = 1;
        options[AZ_max_iter] = 1500;
        options[AZ_poly_ord] = 5;
        options[AZ_kspace]   = 130;
        params[AZ_tol]       = 1.0e-8;
	
	AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); 
setup_time = AZ_second() - start_time;
	
	xxx = (double *) malloc( leng*sizeof(double));

	
        /* Set rhs */
 
        fp = fopen("AZ_capture_rhs.dat","r");
        if (fp == NULL) {
           if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n");
/*
           AZ_random_vector(rhs, data_org, proc_config);
           AZ_reorder_vec(rhs, data_org, update_index, NULL);
           AZ_random_vector(xxx, data_org, proc_config);
           AZ_reorder_vec(xxx, data_org, update_index, NULL);
           Amat->matvec(xxx, rhs, Amat, proc_config);
*/
        }
        else {
           ch = getc(fp);
           if (ch == 'S') {
              while ( (ch = getc(fp)) != '\n') ;
           }
           else ungetc(ch,fp);
           for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) 
              fscanf(fp,"%lf",&(rhs[i]));
           fclose(fp);
        }
	for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; 

        /* Set x */

        fp = fopen("AZ_capture_init_guess.dat","r");
        if (fp != NULL) {
           ch = getc(fp);
           if (ch == 'S') {
              while ( (ch = getc(fp)) != '\n') ;
           }
           else ungetc(ch,fp);
           for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++)
              fscanf(fp,"%lf",&(xxx[i]));
           fclose(fp);
           options[AZ_conv] = AZ_expected_values;
        }

        /* if Dirichlet BC ... put the answer in */

        for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) {
           if ( (val[i] > .99999999) && (val[i] < 1.0000001))
              xxx[i] = rhs[i];      
        }

        fp = fopen("AZ_no_multilevel.dat","r");
        scaling = AZ_scaling_create();
start_time = AZ_second();
        if (fp != NULL) {
           fclose(fp);
           options[AZ_precond] = AZ_none;
           options[AZ_scaling] = AZ_sym_diag;
           options[AZ_ignore_scaling] = AZ_TRUE;

           options[AZ_keep_info] = 1;
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 

/*
           options[AZ_pre_calc] = AZ_reuse;
           options[AZ_conv] = AZ_expected_values;
           if (proc_config[AZ_node] == 0) 
              printf("\n-------- Second solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 
           if (proc_config[AZ_node] == 0) 
              printf("\n-------- Third solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); 
*/
        }
        else {
           options[AZ_keep_info] = 1;
/*
options[AZ_max_iter] = 40;
*/
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
for (j = 0; j < Nrigid; j++) {
alpha = -AZ_gdot(N_update, xxx, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config);
daxpy_(&N_update, &alpha,  &(rigid[j*N_update]),  &one, xxx, &one);
printf("alpha5 is %e\n",alpha);
}
AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
           options[AZ_pre_calc] = AZ_reuse;
           options[AZ_conv] = AZ_expected_values;
/*
           if (proc_config[AZ_node] == 0) 
              printf("\n-------- Second solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
           if (proc_config[AZ_node] == 0) 
              printf("\n-------- Third solve with improved convergence test -----\n");
           AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); 
*/
        }
   solve_time = AZ_second() - start_time;

   if (proc_config[AZ_node] == 0) 
      printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time);

   ML_Aggregate_Destroy(&ag);
   ML_Destroy(&ml);
   AZ_free((void *) Amat->data_org);
   AZ_free((void *) Amat->val);
   AZ_free((void *) Amat->bindx);
   AZ_free((void *) update);
   AZ_free((void *) external);
   AZ_free((void *) extern_index);
   AZ_free((void *) update_index);
   if (Amat  != NULL) AZ_matrix_destroy(&Amat);
   if (Pmat  != NULL) AZ_precond_destroy(&Pmat);
   free(xxx);
   free(rhs);


#ifdef ML_MPI
  MPI_Finalize();
#endif
	
  return 0;
	
}
Example #6
0
int main(int argc, char *argv[])
{
	int num_PDE_eqns=6, N_levels=4, nsmooth=2;

	int    leng, level, N_grid_pts, coarsest_level;

  /* See Aztec User's Guide for more information on the */
  /* variables that follow.                             */

  int    proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE];
  double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE];

  /* data structure for matrix corresponding to the fine grid */

  double *val = NULL, *xxx, *rhs, solve_time, setup_time, start_time;
  AZ_MATRIX *Amat;
  AZ_PRECOND *Pmat = NULL;
  ML *ml;
  FILE *fp;
  int i, j, Nrigid, *garbage = NULL;
#ifdef ML_partition
  int nblocks;
  int *block_list = NULL;
  int k;
#endif
  struct AZ_SCALING *scaling;
  ML_Aggregate *ag;
double *mode, *rigid;
char filename[80];
double alpha;
int allocated = 0;
int old_prec, old_sol;
double old_tol;
/*
double *Amode, beta, biggest;
int big_ind = -1, ii;
*/
ML_Operator *Amatrix;
int *rowi_col = NULL, rowi_N, count2, ccc;
double *rowi_val = NULL;
double max_diag, min_diag, max_sum, sum;
 int nBlocks, *blockIndices, Ndof;
#ifdef ML_partition
   FILE *fp2;
   int count;

   if (argc != 2) {
     printf("Usage: ml_read_elas num_processors\n");
     exit(1);
   }
   else sscanf(argv[1],"%d",&nblocks);
#endif

#ifdef HAVE_MPI
  MPI_Init(&argc,&argv);
  /* get number of processors and the name of this processor */

  AZ_set_proc_config(proc_config, MPI_COMM_WORLD);
#else
  AZ_set_proc_config(proc_config, AZ_NOT_MPI);
#endif

  /* read in the number of matrix equations */
  leng = 0;
  if (proc_config[AZ_node] == 0) {
#    ifdef binary
	fp=fopen(".data","rb");
#    else
	fp=fopen(".data","r");
#    endif
     if (fp==NULL) {
        printf("couldn't open file .data\n");
        exit(1);
     }
#    ifdef binary
        fread(&leng, sizeof(int), 1, fp);
#    else
        fscanf(fp,"%d",&leng);
#    endif
     fclose(fp);
  }
  leng = AZ_gsum_int(leng, proc_config);

  N_grid_pts=leng/num_PDE_eqns;

  /* initialize the list of global indices. NOTE: the list of global */
  /* indices must be in ascending order so that subsequent calls to  */
  /* AZ_find_index() will function properly. */

  if (proc_config[AZ_N_procs] == 1) i = AZ_linear;
  else i = AZ_file;
  AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns,i);

  AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config);


  /* This code is to fix things up so that we are sure we have */
  /* all block (including the ghost nodes the same size.       */

  AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update);

  AZ_transform_norowreordering(proc_config, &external, bindx, val,  update, &update_index,
	       &extern_index, &data_org, N_update, 0, 0, 0, &cpntr,
	       AZ_MSR_MATRIX);

  Amat = AZ_matrix_create( leng );
  AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL);

  Amat->matrix_type  = data_org[AZ_matrix_type];

  data_org[AZ_N_rows]  = data_org[AZ_N_internal] + data_org[AZ_N_border];

#ifdef SCALE_ME
  ML_MSR_sym_diagonal_scaling(Amat, proc_config, &scaling_vect);
#endif

  start_time = AZ_second();

  options[AZ_scaling] = AZ_none;
  ML_Create(&ml, N_levels);
  ML_Set_PrintLevel(10);


  /* set up discretization matrix and matrix vector function */

  AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config);

#ifdef ML_partition

  /* this code is meant to partition the matrices so that things can be */
  /* run in parallel later.                                             */
  /* It is meant to be run on only one processor.                       */
#ifdef	MB_MODIF
  fp2 = fopen(".update","w");
#else
  fp2 = fopen("partition_file","w");
#endif

  ML_Operator_AmalgamateAndDropWeak(&(ml->Amat[N_levels-1]), num_PDE_eqns, 0.0);
  ML_Gen_Blocks_Metis(ml, N_levels-1, &nblocks, &block_list);

  for (i = 0; i < nblocks; i++) {
     count = 0;
     for (j = 0; j < ml->Amat[N_levels-1].outvec_leng; j++) {
        if (block_list[j] == i) count++;
     }
     fprintf(fp2,"   %d\n",count*num_PDE_eqns);
     for (j = 0; j < ml->Amat[N_levels-1].outvec_leng; j++) {
        if (block_list[j] == i) {
           for (k = 0; k < num_PDE_eqns; k++)  fprintf(fp2,"%d\n",j*num_PDE_eqns+k);
        }
     }
  }
  fclose(fp2);
  ML_Operator_UnAmalgamateAndDropWeak(&(ml->Amat[N_levels-1]),num_PDE_eqns,0.0);
#ifdef	MB_MODIF
  printf(" partition file dumped in .update\n");
#endif
  exit(1);
#endif

  ML_Aggregate_Create( &ag );
/*
  ML_Aggregate_Set_CoarsenScheme_MIS(ag);
*/
#ifdef MB_MODIF
  ML_Aggregate_Set_DampingFactor(ag,1.50);
#else
  ML_Aggregate_Set_DampingFactor(ag,1.5);
#endif
  ML_Aggregate_Set_CoarsenScheme_METIS(ag);
  ML_Aggregate_Set_NodesPerAggr( ml, ag, -1, 35);
  /*
  ML_Aggregate_Set_Phase3AggregateCreationAggressiveness(ag, 10.001);
  */


  ML_Aggregate_Set_Threshold(ag, 0.0);
  ML_Aggregate_Set_MaxCoarseSize( ag, 300);


  /* read in the rigid body modes */

   Nrigid = 0;

  /* to ensure compatibility with RBM dumping software */
   if (proc_config[AZ_node] == 0) {

      sprintf(filename,"rigid_body_mode%02d",Nrigid+1);
      while( (fp = fopen(filename,"r")) != NULL) {
	which_filename = 1;
          fclose(fp);
          Nrigid++;
          sprintf(filename,"rigid_body_mode%02d",Nrigid+1);
      }
      sprintf(filename,"rigid_body_mode%d",Nrigid+1);
      while( (fp = fopen(filename,"r")) != NULL) {
          fclose(fp);
          Nrigid++;
          sprintf(filename,"rigid_body_mode%d",Nrigid+1);
      }
    }

    Nrigid = AZ_gsum_int(Nrigid,proc_config);

    if (Nrigid != 0) {
       rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) );
       if (rigid == NULL) {
          printf("Error: Not enough space for rigid body modes\n");
       }
    }

    rhs   = (double *) malloc(leng*sizeof(double));
    xxx   = (double *) malloc(leng*sizeof(double));

    for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0;



    for (i = 0; i < Nrigid; i++) {
       if (which_filename == 1) sprintf(filename,"rigid_body_mode%02d",i+1);
       else sprintf(filename,"rigid_body_mode%d",i+1);
       AZ_input_msr_matrix(filename,update,&mode,&garbage,N_update,proc_config);
       AZ_reorder_vec(mode, data_org, update_index, NULL);
       /* here is something to stick a rigid body mode as the initial */
       /* The idea is to solve A x = 0 without smoothing with a two   */
       /* level method. If everything is done properly, we should     */
       /* converge in 2 iterations.                                   */
       /* Note: we must also zero out components of the rigid body    */
       /* mode that correspond to Dirichlet bcs.                      */

       if (i == -4) {
          for (iii = 0; iii < leng; iii++) xxx[iii] = mode[iii];

          ccc = 0;
          Amatrix = &(ml->Amat[N_levels-1]);
          for (iii = 0; iii < Amatrix->outvec_leng; iii++) {
             ML_get_matrix_row(Amatrix,1,&iii,&allocated,&rowi_col,&rowi_val,
                               &rowi_N, 0);
             count2 = 0;
             for (j = 0; j < rowi_N; j++) if (rowi_val[j] != 0.) count2++;
             if (count2 <= 1) { xxx[iii] = 0.; ccc++; }
          }
          free(rowi_col); free(rowi_val);
          allocated = 0; rowi_col = NULL; rowi_val = NULL;
       }

       /*
        *  Rescale matrix/rigid body modes and checking
        *
        AZ_sym_rescale_sl(mode, Amat->data_org, options, proc_config, scaling);
        Amat->matvec(mode, rigid, Amat, proc_config);
        for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]);
        */

        /* Here is some code to check that the rigid body modes are  */
        /* really rigid body modes. The idea is to multiply by A and */
        /* then to zero out things that we "think" are boundaries.   */
        /* In this hardwired example, things near boundaries         */
        /* correspond to matrix rows that do not have 81 nonzeros.   */
        /*

        Amode = (double *) malloc(leng*sizeof(double));
        Amat->matvec(mode, Amode, Amat, proc_config);
        j = 0;
        biggest = 0.0;
        for (ii = 0; ii < N_update; ii++) {
           if ( Amat->bindx[ii+1] - Amat->bindx[ii] != 80) {
              Amode[ii] = 0.; j++;
           }
           else {
              if ( fabs(Amode[ii]) > biggest) {
                 biggest=fabs(Amode[ii]); big_ind = ii;
              }
           }
        }
        printf("%d entries zeroed out of %d elements\n",j,N_update);
        alpha = AZ_gdot(N_update, Amode, Amode, proc_config);
        beta  = AZ_gdot(N_update,  mode,  mode, proc_config);
        printf("||A r||^2 =%e, ||r||^2 = %e, ratio = %e\n",
               alpha,beta,alpha/beta);
        printf("the biggest is %e at row %d\n",biggest,big_ind);
        free(Amode);

        */

        /* orthogonalize mode with respect to previous modes. */

        for (j = 0; j < i; j++) {
           alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/
                    AZ_gdot(N_update, &(rigid[j*N_update]),
                               &(rigid[j*N_update]), proc_config);
	   /*           daxpy_(&N_update,&alpha,&(rigid[j*N_update]),  &one, mode, &one); */
        }
#ifndef	MB_MODIF
       printf(" after mb %e %e %e\n",mode[0],mode[1],mode[2]);
#endif

        for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j];
        free(mode);
        free(garbage); garbage = NULL;

    }

    if (Nrigid != 0) {
             ML_Aggregate_Set_BlockDiagScaling(ag);
       ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update);
       free(rigid);
    }
#ifdef SCALE_ME
    ML_Aggregate_Scale_NullSpace(ag, scaling_vect, N_update);
#endif

    coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, N_levels-1,
				ML_DECREASING, ag);
   AZ_defaults(options, params);
   coarsest_level = N_levels - coarsest_level;
   if ( proc_config[AZ_node] == 0 )
	printf("Coarse level = %d \n", coarsest_level);

   /* set up smoothers */

   for (level = N_levels-1; level > coarsest_level; level--) {

/*
      ML_Gen_Smoother_BlockGaussSeidel(ml, level,ML_BOTH, 1, 1., num_PDE_eqns);
*/

    /*  Sparse approximate inverse smoother that acutally does both */
    /*  pre and post smoothing.                                     */
    /*
      ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth,
                                parasails_sym, parasails_thresh,
                                parasails_nlevels, parasails_filter,
                                parasails_loadbal, parasails_factorized);
     */

     /* This is the symmetric Gauss-Seidel smoothing that we usually use. */
     /* In parallel, it is not a true Gauss-Seidel in that each processor */
     /* does a Gauss-Seidel on its local submatrix independent of the     */
     /* other processors.                                                 */

     /* ML_Gen_Smoother_Cheby(ml, level, ML_BOTH, 30., nsmooth); */
     Ndof = ml->Amat[level].invec_leng;

     ML_Gen_Blocks_Aggregates(ag, level, &nBlocks, &blockIndices);

     ML_Gen_Smoother_BlockDiagScaledCheby(ml, level, ML_BOTH, 30.,nsmooth,
					  nBlocks, blockIndices);

     /*
      ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_BOTH, nsmooth,1.);
     */


      /* This is a true Gauss Seidel in parallel. This seems to work for  */
      /* elasticity problems.  However, I don't believe that this is very */
      /* efficient in parallel.                                           */
     /*
      nblocks = ml->Amat[level].invec_leng/num_PDE_eqns;
      blocks = (int *) ML_allocate(sizeof(int)*N_update);
      for (i =0; i < ml->Amat[level].invec_leng; i++)
         blocks[i] = i/num_PDE_eqns;

      ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml , level, ML_PRESMOOTHER,
                                                  nsmooth, 1., nblocks, blocks);
      ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml, level, ML_POSTSMOOTHER,
                                                  nsmooth, 1., nblocks, blocks);
      free(blocks);
*/

      /* Block Jacobi Smoothing */
      /*
      nblocks = ml->Amat[level].invec_leng/num_PDE_eqns;
      blocks = (int *) ML_allocate(sizeof(int)*N_update);
      for (i =0; i < ml->Amat[level].invec_leng; i++)
         blocks[i] = i/num_PDE_eqns;

      ML_Gen_Smoother_VBlockJacobi(ml , level, ML_BOTH, nsmooth,
                                   ML_ONE_STEP_CG, nblocks, blocks);
      free(blocks);
      */

      /* Jacobi Smoothing                                                 */
     /*

      ML_Gen_Smoother_Jacobi(ml , level, ML_PRESMOOTHER, nsmooth, ML_ONE_STEP_CG);
      ML_Gen_Smoother_Jacobi(ml , level, ML_POSTSMOOTHER, nsmooth,ML_ONE_STEP_CG);
     */



      /*  This does a block Gauss-Seidel (not true GS in parallel)        */
      /*  where each processor has 'nblocks' blocks.                      */
      /*
      nblocks = 250;
      ML_Gen_Blocks_Metis(ml, level, &nblocks, &blocks);
      ML_Gen_Smoother_VBlockJacobi(ml , level, ML_BOTH, nsmooth,ML_ONE_STEP_CG,
                                        nblocks, blocks);
      free(blocks);
      */
      num_PDE_eqns = 6;
   }
   /* Choose coarse grid solver: mls, superlu, symGS, or Aztec */

   /*
   ML_Gen_Smoother_Cheby(ml, coarsest_level, ML_BOTH, 30., nsmooth);
   ML_Gen_CoarseSolverSuperLU( ml, coarsest_level);
   */
   /*
   ML_Gen_Smoother_SymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1.);
   */

   old_prec = options[AZ_precond];
   old_sol  = options[AZ_solver];
   old_tol  = params[AZ_tol];
   params[AZ_tol] = 1.0e-9;
   params[AZ_tol] = 1.0e-5;
   options[AZ_precond] = AZ_Jacobi;
   options[AZ_solver]  = AZ_cg;
   options[AZ_poly_ord] = 1;
   options[AZ_conv] = AZ_r0;
   options[AZ_orth_kvecs] = AZ_TRUE;

   j = AZ_gsum_int(ml->Amat[coarsest_level].outvec_leng, proc_config);

   options[AZ_keep_kvecs] = j - 6;
   options[AZ_max_iter] =  options[AZ_keep_kvecs];

   ML_Gen_SmootherAztec(ml, coarsest_level, options, params,
            proc_config, status, options[AZ_keep_kvecs], ML_PRESMOOTHER, NULL);

   options[AZ_conv] = AZ_noscaled;
   options[AZ_keep_kvecs] = 0;
   options[AZ_orth_kvecs] = 0;
   options[AZ_precond] = old_prec;
   options[AZ_solver] = old_sol;
   params[AZ_tol] = old_tol;

   /*   */


#ifdef RST_MODIF
   ML_Gen_Solver(ml, ML_MGV, N_levels-1, coarsest_level);
#else
#ifdef	MB_MODIF
   ML_Gen_Solver(ml, ML_SAAMG,   N_levels-1, coarsest_level);
#else
   ML_Gen_Solver(ml, ML_MGFULLV, N_levels-1, coarsest_level);
#endif
#endif

   options[AZ_solver]   = AZ_GMRESR;
         options[AZ_solver]   = AZ_cg;
   options[AZ_scaling]  = AZ_none;
   options[AZ_precond]  = AZ_user_precond;
   options[AZ_conv]     = AZ_r0;
   options[AZ_conv] = AZ_noscaled;
   options[AZ_output]   = 1;
   options[AZ_max_iter] = 500;
   options[AZ_poly_ord] = 5;
   options[AZ_kspace]   = 40;
   params[AZ_tol]       = 4.8e-6;

   AZ_set_ML_preconditioner(&Pmat, Amat, ml, options);
   setup_time = AZ_second() - start_time;

   /* Set rhs */

   fp = fopen("AZ_capture_rhs.dat","r");
   if (fp == NULL) {
      AZ_random_vector(rhs, data_org, proc_config);
      if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n");
      for (i = 0; i < -N_update; i++) {
        rhs[i] = (double) update[i]; rhs[i] = 7.;
      }
   }
   else {
      if (proc_config[AZ_node]== 0) printf("reading rhs guess from file\n");
      AZ_input_msr_matrix("AZ_capture_rhs.dat", update, &rhs, &garbage,
			  N_update, proc_config);
      free(garbage);
   }
   AZ_reorder_vec(rhs, data_org, update_index, NULL);

   printf("changing rhs by multiplying with A\n");
  Amat->matvec(rhs, xxx, Amat, proc_config);
  for (i = 0; i < N_update; i++) rhs[i] = xxx[i];

   fp = fopen("AZ_capture_init_guess.dat","r");
   if (fp != NULL) {
      fclose(fp);
      if (proc_config[AZ_node]== 0) printf("reading initial guess from file\n");
      AZ_input_msr_matrix("AZ_capture_init_guess.dat", update, &xxx, &garbage,
      			  N_update, proc_config);
      free(garbage);


      xxx = (double *) realloc(xxx, sizeof(double)*(
					 Amat->data_org[AZ_N_internal]+
					 Amat->data_org[AZ_N_border] +
					 Amat->data_org[AZ_N_external]));
   }
   AZ_reorder_vec(xxx, data_org, update_index, NULL);

   /* if Dirichlet BC ... put the answer in */

/*
   for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) {
      if ( (val[i] > .99999999) && (val[i] < 1.0000001))
         xxx[i] = rhs[i];
   }
*/

   fp = fopen("AZ_no_multilevel.dat","r");
   scaling = AZ_scaling_create();
   start_time = AZ_second();


   if (fp != NULL) {
      fclose(fp);
      options[AZ_precond] = AZ_none;
      options[AZ_scaling] = AZ_sym_diag;
      options[AZ_ignore_scaling] = AZ_TRUE;

      options[AZ_keep_info] = 1;
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);

/*
      options[AZ_pre_calc] = AZ_reuse;
      options[AZ_conv] = AZ_expected_values;
      if (proc_config[AZ_node] == 0)
              printf("\n-------- Second solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);
      if (proc_config[AZ_node] == 0)
              printf("\n-------- Third solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling);
*/
   }
   else {
      options[AZ_keep_info] = 1;
      options[AZ_conv] = AZ_noscaled;
      options[AZ_conv] = AZ_r0;
      params[AZ_tol] = 1.0e-7;
      /* ML_Iterate(ml, xxx, rhs); */
alpha = sqrt(AZ_gdot(N_update, xxx, xxx, proc_config));
printf("init guess = %e\n",alpha);
alpha = sqrt(AZ_gdot(N_update, rhs, rhs, proc_config));
printf("rhs = %e\n",alpha);
#ifdef SCALE_ME
	ML_MSR_scalerhs(rhs, scaling_vect, data_org[AZ_N_internal] +
                    data_org[AZ_N_border]);
	ML_MSR_scalesol(xxx, scaling_vect, data_org[AZ_N_internal] +
			data_org[AZ_N_border]);
#endif

max_diag = 0.;
min_diag = 1.e30;
max_sum  = 0.;
for (i = 0; i < N_update; i++) {
   if (Amat->val[i] < 0.) printf("woops negative diagonal A(%d,%d) = %e\n",
				 i,i,Amat->val[i]);
   if (Amat->val[i] > max_diag) max_diag = Amat->val[i];
   if (Amat->val[i] < min_diag) min_diag = Amat->val[i];
   sum = fabs(Amat->val[i]);
   for (j = Amat->bindx[i]; j < Amat->bindx[i+1]; j++) {
      sum += fabs(Amat->val[j]);
   }
   if (sum > max_sum) max_sum = sum;
}
printf("Largest diagonal = %e, min diag = %e large abs row sum = %e\n",
max_diag, min_diag, max_sum);

      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);

      options[AZ_pre_calc] = AZ_reuse;
      options[AZ_conv] = AZ_expected_values;
/*
      if (proc_config[AZ_node] == 0)
              printf("\n-------- Second solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);
      if (proc_config[AZ_node] == 0)
              printf("\n-------- Third solve with improved convergence test -----\n");
      AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling);
*/
   }
   solve_time = AZ_second() - start_time;

   if (proc_config[AZ_node] == 0)
      printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time);
   if (proc_config[AZ_node] == 0)
     printf("Printing out a few entries of the solution ...\n");

   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 7) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 23) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 47) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 101) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}
   j = AZ_gsum_int(7, proc_config); /* sync processors */
   for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++)
     if (update[j] == 171) {printf("solution(gid = %d) = %10.4e\n",
			      update[j],xxx[update_index[j]]); fflush(stdout);}


   ML_Aggregate_Destroy(&ag);
   ML_Destroy(&ml);
   AZ_free((void *) Amat->data_org);
   AZ_free((void *) Amat->val);
   AZ_free((void *) Amat->bindx);
   AZ_free((void *) update);
   AZ_free((void *) external);
   AZ_free((void *) extern_index);
   AZ_free((void *) update_index);
   AZ_scaling_destroy(&scaling);
   if (Amat  != NULL) AZ_matrix_destroy(&Amat);
   if (Pmat  != NULL) AZ_precond_destroy(&Pmat);
   free(xxx);
   free(rhs);


#ifdef HAVE_MPI
  MPI_Finalize();
#endif

  return 0;

}