int call_AZ_iterate(AZ_MATRIX* Amat, AZ_PRECOND* P, AZ_SCALING* S, double* x, double* b, int* options, double* params, double* status, int* proc_config, int keep_info, int pre_calc, bool verbose) { options[AZ_keep_info] = keep_info; options[AZ_pre_calc] = pre_calc; std::string keepstr; std::string calcstr; if (keep_info == 1) keepstr = "true"; else keepstr = "false"; if (pre_calc == AZ_calc) calcstr = "AZ_calc"; else calcstr = "AZ_reuse"; if (verbose) cout << " solve with AZ_keep_info="<<keepstr<<", AZ_pre_calc="<<calcstr<<endl; for(int i=0; i<Amat->N_update; ++i) x[i] = 0.0; AZ_iterate(x, b, options, params, status, proc_config, Amat, P, S); return(0); }
int main(int argc, char *argv[]) { int Nnodes=16*16; /* Total number of nodes in the problem.*/ /* 'Nnodes' must be a perfect square. */ int MaxMgLevels=6; /* Maximum number of Multigrid Levels */ int Nits_per_presmooth=1; /* # of pre & post smoothings per level */ double tolerance = 1.0e-8; /* At convergence: */ /* ||r_k||_2 < tolerance ||r_0||_2 */ int smoothPe_flag = ML_YES; /* ML_YES: smooth tentative prolongator */ /* ML_NO: don't smooth prolongator */ /***************************************************************************/ /* Select Hiptmair relaxation subsmoothers for the nodal and edge problems */ /* Choices include */ /* 1) ML_Gen_Smoother_SymGaussSeidel: this corresponds to a processor */ /* local version of symmetric Gauss-Seidel/SOR. The number of sweeps */ /* can be set via either 'edge_its' or 'nodal_its'. The damping can */ /* be set via 'edge_omega' or 'nodal_omega'. When set to ML_DDEFAULT, */ /* the damping is set to '1' on one processor. On multiple processors */ /* a lower damping value is set. This is needed to converge processor */ /* local SOR. */ /* 2) ML_Gen_Smoother_Cheby: this corresponds to polynomial relaxation. */ /* The degree of the polynomial is set via 'edge_its' or 'nodal_its'. */ /* If the degree is '-1', Marian Brezina's MLS polynomial is chosen. */ /* Otherwise, a Chebyshev polynomial is used over high frequencies */ /* [ lambda_max/alpha , lambda_max]. Lambda_max is computed. 'alpha' */ /* is hardwired in this example to correspond to twice the ratio of */ /* unknowns in the fine and coarse meshes. */ /* */ /* Using 'hiptmair_type' (see comments below) it is also possible to choose*/ /* when edge and nodal problems are relaxed within the Hiptmair smoother. */ /***************************************************************************/ void *edge_smoother=(void *) /* Edge relaxation: */ ML_Gen_Smoother_Cheby; /* ML_Gen_Smoother_Cheby */ /* ML_Gen_Smoother_SymGaussSeidel */ void *nodal_smoother=(void *) /* Nodal relaxation */ ML_Gen_Smoother_Cheby;/* ML_Gen_Smoother_Cheby */ /* ML_Gen_Smoother_SymGaussSeidel */ int edge_its = 3; /* Iterations or polynomial degree for */ int nodal_its = 3; /* edge/nodal subsmoothers. */ double nodal_omega = ML_DDEFAULT, /* SOR damping parameter for noda/edge */ edge_omega = ML_DDEFAULT; /* subsmoothers (see comments above). */ int hiptmair_type=HALF_HIPTMAIR;/* FULL_HIPTMAIR: each invokation */ /* smoothes on edges, then nodes, */ /* and then once again on edges. */ /* HALF_HIPTMAIR: each pre-invokation */ /* smoothes on edges, then nodes. */ /* Each post-invokation smoothes */ /* on nodes then edges. . */ ML_Operator *Tmat, *Tmat_trans, **Tmat_array, **Tmat_trans_array; ML *ml_edges, *ml_nodes; ML_Aggregate *ag; int Nfine_edge, Ncoarse_edge, Nfine_node, Ncoarse_node, Nlevels; int level, coarsest_level, itmp; double edge_coarsening_rate, node_coarsening_rate, *rhs, *xxx; void **edge_args, **nodal_args; struct user_partition Edge_Partition = {NULL, NULL,0,0}, Node_Partition = {NULL, NULL,0,0}; struct Tmat_data Tmat_data; int i, Ntotal; ML_Comm *comm; /* See Aztec User's Guide for information on these variables */ #ifdef AZTEC AZ_MATRIX *Ke_mat, *Kn_mat; AZ_PRECOND *Pmat = NULL; int proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE]; double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE]; #endif /* get processor information (proc id & # of procs) and set ML's printlevel. */ #ifdef ML_MPI MPI_Init(&argc,&argv); #endif #ifdef AZTEC AZ_set_proc_config(proc_config, COMMUNICATOR); #endif ML_Set_PrintLevel(10); /* set ML's output level: 0 gives least output */ /* Set the # of global nodes/edges and partition both the edges and the */ /* nodes over the processors. NOTE: I believe we assume that if an edge */ /* is assigned to a processor at least one of its nodes must be also */ /* assigned to that processor. */ Node_Partition.Nglobal = Nnodes; Edge_Partition.Nglobal = Node_Partition.Nglobal*2; Node_Partition.type = NODE; Edge_Partition.type = EDGE; #define perxodic #ifdef periodic Node_Partition.Nglobal += 2; #endif partition_edges(&Edge_Partition); partition_nodes(&Node_Partition); xxx = (double *) ML_allocate((Edge_Partition.Nlocal+100)*sizeof(double)); rhs = (double *) ML_allocate((Edge_Partition.Nlocal+100)*sizeof(double)); for (i = 0; i < Edge_Partition.Nlocal + 100; i++) xxx[i] = -1.; for (i = 0; i < Edge_Partition.Nlocal; i++) xxx[i] = (double) Edge_Partition.my_global_ids[i]; update_ghost_edges(xxx, (void *) &Edge_Partition); /* Create an empty multigrid hierarchy and set the 'MaxMGLevels-1'th */ /* level discretization within this hierarchy to the ML matrix */ /* representing Ke (Maxwell edge discretization). */ ML_Create(&ml_edges, MaxMgLevels); #ifdef AZTEC /* Build Ke as an Aztec matrix. Use built-in function AZ_ML_Set_Amat() */ /* to convert to an ML matrix and put in hierarchy. */ Ke_mat = user_Ke_build(&Edge_Partition); AZ_ML_Set_Amat(ml_edges, MaxMgLevels-1, Edge_Partition.Nlocal, Edge_Partition.Nlocal, Ke_mat, proc_config); #else /* Build Ke directly as an ML matrix. */ ML_Init_Amatrix (ml_edges, MaxMgLevels-1, Edge_Partition.Nlocal, Edge_Partition.Nlocal, &Edge_Partition); Ntotal = Edge_Partition.Nlocal; if (Edge_Partition.nprocs == 2) Ntotal += Edge_Partition.Nghost; ML_Set_Amatrix_Getrow(ml_edges, MaxMgLevels-1, Ke_getrow, update_ghost_edges, Ntotal); ML_Set_Amatrix_Matvec(ml_edges, MaxMgLevels-1, Ke_matvec); #endif /* Build an Aztec matrix representing an auxiliary nodal PDE problem. */ /* This should be a variable coefficient Poisson problem (with unknowns*/ /* at the nodes). The coefficients should be chosen to reflect the */ /* conductivity of the original edge problems. */ /* Create an empty multigrid hierarchy. Convert the Aztec matrix to an */ /* ML matrix and put it in the 'MaxMGLevels-1' level of the hierarchy. */ /* Note it is possible to multiply T'*T for get this matrix though this*/ /* will not incorporate material properties. */ ML_Create(&ml_nodes, MaxMgLevels); #ifdef AZTEC Kn_mat = user_Kn_build( &Node_Partition); AZ_ML_Set_Amat(ml_nodes, MaxMgLevels-1, Node_Partition.Nlocal, Node_Partition.Nlocal, Kn_mat, proc_config); #else ML_Init_Amatrix (ml_nodes, MaxMgLevels-1 , Node_Partition.Nlocal, Node_Partition.Nlocal, &Node_Partition); Ntotal = Node_Partition.Nlocal; if (Node_Partition.nprocs == 2) Ntotal += Node_Partition.Nghost; ML_Set_Amatrix_Getrow(ml_nodes, MaxMgLevels-1, Kn_getrow, update_ghost_nodes, Ntotal); #endif /* Build an ML matrix representing the null space of the PDE problem. */ /* This should be a discrete gradient (nodes to edges). */ #ifdef AZTEC Tmat = user_T_build (&Edge_Partition, &Node_Partition, &(ml_nodes->Amat[MaxMgLevels-1])); #else Tmat = ML_Operator_Create(ml_nodes->comm); Tmat_data.edge = &Edge_Partition; Tmat_data.node = &Node_Partition; Tmat_data.Kn = &(ml_nodes->Amat[MaxMgLevels-1]); ML_Operator_Set_ApplyFuncData( Tmat, Node_Partition.Nlocal, Edge_Partition.Nlocal, ML_EMPTY, (void *) &Tmat_data, Edge_Partition.Nlocal, NULL, 0); ML_Operator_Set_Getrow( Tmat, ML_INTERNAL, Edge_Partition.Nlocal,Tmat_getrow); ML_Operator_Set_ApplyFunc(Tmat, ML_INTERNAL, Tmat_matvec); ML_Comm_Create( &comm); ML_CommInfoOP_Generate( &(Tmat->getrow->pre_comm), update_ghost_nodes, &Node_Partition,comm, Tmat->invec_leng, Node_Partition.Nghost); #endif /********************************************************************/ /* Set some ML parameters. */ /*------------------------------------------------------------------*/ ML_Set_ResidualOutputFrequency(ml_edges, 1); ML_Set_Tolerance(ml_edges, 1.0e-8); ML_Aggregate_Create( &ag ); ML_Aggregate_Set_CoarsenScheme_Uncoupled(ag); ML_Aggregate_Set_DampingFactor(ag, 0.0); /* must use 0 for maxwell */ ML_Aggregate_Set_MaxCoarseSize(ag, 30); ML_Aggregate_Set_Threshold(ag, 0.0); /********************************************************************/ /* Set up Tmat_trans */ /*------------------------------------------------------------------*/ Tmat_trans = ML_Operator_Create(ml_edges->comm); ML_Operator_Transpose_byrow(Tmat, Tmat_trans); Nlevels=ML_Gen_MGHierarchy_UsingReitzinger(ml_edges, &ml_nodes,MaxMgLevels-1, ML_DECREASING,ag,Tmat,Tmat_trans, &Tmat_array,&Tmat_trans_array, smoothPe_flag, 1.5); /* Set the Hiptmair subsmoothers */ if (nodal_smoother == (void *) ML_Gen_Smoother_SymGaussSeidel) { nodal_args = ML_Smoother_Arglist_Create(2); ML_Smoother_Arglist_Set(nodal_args, 0, &nodal_its); ML_Smoother_Arglist_Set(nodal_args, 1, &nodal_omega); } if (edge_smoother == (void *) ML_Gen_Smoother_SymGaussSeidel) { edge_args = ML_Smoother_Arglist_Create(2); ML_Smoother_Arglist_Set(edge_args, 0, &edge_its); ML_Smoother_Arglist_Set(edge_args, 1, &edge_omega); } if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) { nodal_args = ML_Smoother_Arglist_Create(2); ML_Smoother_Arglist_Set(nodal_args, 0, &nodal_its); Nfine_node = Tmat_array[MaxMgLevels-1]->invec_leng; Nfine_node = ML_gsum_int(Nfine_node, ml_edges->comm); } if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) { edge_args = ML_Smoother_Arglist_Create(2); ML_Smoother_Arglist_Set(edge_args, 0, &edge_its); Nfine_edge = Tmat_array[MaxMgLevels-1]->outvec_leng; Nfine_edge = ML_gsum_int(Nfine_edge, ml_edges->comm); } /**************************************************** * Set up smoothers for all levels but the coarsest. * ****************************************************/ coarsest_level = MaxMgLevels - Nlevels; for (level = MaxMgLevels-1; level > coarsest_level; level--) { if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) { Ncoarse_edge = Tmat_array[level-1]->outvec_leng; Ncoarse_edge = ML_gsum_int(Ncoarse_edge, ml_edges->comm); edge_coarsening_rate = 2.*((double) Nfine_edge)/ ((double) Ncoarse_edge); ML_Smoother_Arglist_Set(edge_args, 1, &edge_coarsening_rate); Nfine_edge = Ncoarse_edge; } if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) { Ncoarse_node = Tmat_array[level-1]->invec_leng; Ncoarse_node = ML_gsum_int(Ncoarse_node, ml_edges->comm); node_coarsening_rate = 2.*((double) Nfine_node)/ ((double) Ncoarse_node); ML_Smoother_Arglist_Set(nodal_args, 1, &node_coarsening_rate); Nfine_node = Ncoarse_node; } ML_Gen_Smoother_Hiptmair(ml_edges, level, ML_BOTH, Nits_per_presmooth, Tmat_array, Tmat_trans_array, NULL, edge_smoother, edge_args, nodal_smoother,nodal_args, hiptmair_type); } /******************************************* * Set up coarsest level smoother *******************************************/ if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) { edge_coarsening_rate = (double) Nfine_edge; ML_Smoother_Arglist_Set(edge_args, 1, &edge_coarsening_rate); } if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) { node_coarsening_rate = (double) Nfine_node; ML_Smoother_Arglist_Set(nodal_args,1,&node_coarsening_rate); } ML_Gen_CoarseSolverSuperLU( ml_edges, coarsest_level); /* Must be called before invoking the preconditioner */ ML_Gen_Solver(ml_edges, ML_MGV, MaxMgLevels-1, coarsest_level); /* Set the initial guess and the right hand side. Invoke solver */ xxx = (double *) ML_allocate(Edge_Partition.Nlocal*sizeof(double)); ML_random_vec(xxx, Edge_Partition.Nlocal, ml_edges->comm); rhs = (double *) ML_allocate(Edge_Partition.Nlocal*sizeof(double)); ML_random_vec(rhs, Edge_Partition.Nlocal, ml_edges->comm); #ifdef AZTEC /* Choose the Aztec solver and criteria. Also tell Aztec that */ /* ML will be supplying the preconditioner. */ AZ_defaults(options, params); options[AZ_solver] = AZ_fixed_pt; options[AZ_solver] = AZ_gmres; options[AZ_kspace] = 80; params[AZ_tol] = tolerance; AZ_set_ML_preconditioner(&Pmat, Ke_mat, ml_edges, options); options[AZ_conv] = AZ_noscaled; AZ_iterate(xxx, rhs, options, params, status, proc_config, Ke_mat, Pmat, NULL); #else ML_Iterate(ml_edges, xxx, rhs); #endif /* clean up. */ ML_Smoother_Arglist_Delete(&nodal_args); ML_Smoother_Arglist_Delete(&edge_args); ML_Aggregate_Destroy(&ag); ML_Destroy(&ml_edges); ML_Destroy(&ml_nodes); #ifdef AZTEC AZ_free((void *) Ke_mat->data_org); AZ_free((void *) Ke_mat->val); AZ_free((void *) Ke_mat->bindx); if (Ke_mat != NULL) AZ_matrix_destroy(&Ke_mat); if (Pmat != NULL) AZ_precond_destroy(&Pmat); if (Kn_mat != NULL) AZ_matrix_destroy(&Kn_mat); #endif free(xxx); free(rhs); ML_Operator_Destroy(&Tmat); ML_Operator_Destroy(&Tmat_trans); ML_MGHierarchy_ReitzingerDestroy(MaxMgLevels-2, &Tmat_array, &Tmat_trans_array); #ifdef ML_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { int num_PDE_eqns=1, N_levels=3, nsmooth=2; int leng, level, N_grid_pts, coarsest_level; int leng1,leng2; /* See Aztec User's Guide for more information on the */ /* variables that follow. */ int proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE]; double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE]; /* data structure for matrix corresponding to the fine grid */ double *val = NULL, *xxx, *rhs, solve_time, setup_time, start_time; AZ_MATRIX *Amat; AZ_PRECOND *Pmat = NULL; ML *ml; FILE *fp; int i, j, Nrigid, *garbage, nblocks=0, *blocks = NULL, *block_pde=NULL; struct AZ_SCALING *scaling; ML_Aggregate *ag; double *mode, *rigid=NULL, alpha; char filename[80]; int one = 1; int proc,nprocs; char pathfilename[100]; #ifdef ML_MPI MPI_Init(&argc,&argv); /* get number of processors and the name of this processor */ AZ_set_proc_config(proc_config, MPI_COMM_WORLD); proc = proc_config[AZ_node]; nprocs = proc_config[AZ_N_procs]; #else AZ_set_proc_config(proc_config, AZ_NOT_MPI); proc = 0; nprocs = 1; #endif if (proc_config[AZ_node] == 0) { sprintf(pathfilename,"%s/inputfile",argv[1]); ML_Reader_ReadInput(pathfilename, &context); } else context = (struct reader_context *) ML_allocate(sizeof(struct reader_context)); AZ_broadcast((char *) context, sizeof(struct reader_context), proc_config, AZ_PACK); AZ_broadcast((char *) NULL , 0 , proc_config, AZ_SEND); N_levels = context->N_levels; printf("N_levels %d\n",N_levels); nsmooth = context->nsmooth; num_PDE_eqns = context->N_dofPerNode; printf("num_PDE_eqns %d\n",num_PDE_eqns); ML_Set_PrintLevel(context->output_level); /* read in the number of matrix equations */ leng = 0; if (proc_config[AZ_node] == 0) { sprintf(pathfilename,"%s/data_matrix.txt",argv[1]); fp=fopen(pathfilename,"r"); if (fp==NULL) { printf("**ERR** couldn't open file data_matrix.txt\n"); exit(1); } fscanf(fp,"%d",&leng); fclose(fp); } leng = AZ_gsum_int(leng, proc_config); N_grid_pts=leng/num_PDE_eqns; /* initialize the list of global indices. NOTE: the list of global */ /* indices must be in ascending order so that subsequent calls to */ /* AZ_find_index() will function properly. */ #if 0 if (proc_config[AZ_N_procs] == 1) i = AZ_linear; else i = AZ_file; #endif i = AZ_linear; /* cannot use AZ_input_update for variable blocks (forgot why, but debugged through it)*/ /* make a linear distribution of the matrix */ /* if the linear distribution does not align with the blocks, */ /* this is corrected in ML_AZ_Reader_ReadVariableBlocks */ leng1 = leng/nprocs; leng2 = leng-leng1*nprocs; if (proc >= leng2) { leng2 += (proc*leng1); } else { leng1++; leng2 = proc*leng1; } N_update = leng1; update = (int*)AZ_allocate((N_update+1)*sizeof(int)); if (update==NULL) { (void) fprintf (stderr, "Not enough space to allocate 'update'\n"); fflush(stderr); exit(EXIT_FAILURE); } for (i=0; i<N_update; i++) update[i] = i+leng2; #if 0 /* debug */ printf("proc %d N_update %d\n",proc_config[AZ_node],N_update); fflush(stdout); #endif sprintf(pathfilename,"%s/data_vblocks.txt",argv[1]); ML_AZ_Reader_ReadVariableBlocks(pathfilename,&nblocks,&blocks,&block_pde, &N_update,&update,proc_config); #if 0 /* debug */ printf("proc %d N_update %d\n",proc_config[AZ_node],N_update); fflush(stdout); #endif sprintf(pathfilename,"%s/data_matrix.txt",argv[1]); AZ_input_msr_matrix(pathfilename,update, &val, &bindx, N_update, proc_config); /* This code is to fix things up so that we are sure we have */ /* all blocks (including the ghost nodes) the same size. */ /* not sure, whether this is a good idea with variable blocks */ /* the examples inpufiles (see top of this file) don't need it */ /* anyway */ /* AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update); */ AZ_transform_norowreordering(proc_config, &external, bindx, val, update, &update_index, &extern_index, &data_org, N_update, 0, 0, 0, &cpntr, AZ_MSR_MATRIX); Amat = AZ_matrix_create( leng ); AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL); Amat->matrix_type = data_org[AZ_matrix_type]; data_org[AZ_N_rows] = data_org[AZ_N_internal] + data_org[AZ_N_border]; start_time = AZ_second(); options[AZ_scaling] = AZ_none; ML_Create(&ml, N_levels); /* set up discretization matrix and matrix vector function */ AZ_ML_Set_Amat(ml, 0, N_update, N_update, Amat, proc_config); ML_Set_ResidualOutputFrequency(ml, context->output); ML_Set_Tolerance(ml, context->tol); ML_Aggregate_Create( &ag ); if (ML_strcmp(context->agg_coarsen_scheme,"Mis") == 0) { ML_Aggregate_Set_CoarsenScheme_MIS(ag); } else if (ML_strcmp(context->agg_coarsen_scheme,"Uncoupled") == 0) { ML_Aggregate_Set_CoarsenScheme_Uncoupled(ag); } else if (ML_strcmp(context->agg_coarsen_scheme,"Coupled") == 0) { ML_Aggregate_Set_CoarsenScheme_Coupled(ag); } else if (ML_strcmp(context->agg_coarsen_scheme,"Metis") == 0) { ML_Aggregate_Set_CoarsenScheme_METIS(ag); for (i=0; i<N_levels; i++) ML_Aggregate_Set_NodesPerAggr(ml,ag,i,9); } else if (ML_strcmp(context->agg_coarsen_scheme,"VBMetis") == 0) { /* when no blocks read, use standard metis assuming constant block sizes */ if (!blocks) ML_Aggregate_Set_CoarsenScheme_METIS(ag); else { ML_Aggregate_Set_CoarsenScheme_VBMETIS(ag); ML_Aggregate_Set_Vblocks_CoarsenScheme_VBMETIS(ag,0,N_levels,nblocks, blocks,block_pde,N_update); } for (i=0; i<N_levels; i++) ML_Aggregate_Set_NodesPerAggr(ml,ag,i,9); } else { printf("**ERR** ML: Unknown aggregation scheme %s\n",context->agg_coarsen_scheme); exit(-1); } ML_Aggregate_Set_DampingFactor(ag, context->agg_damping); ML_Aggregate_Set_MaxCoarseSize( ag, context->maxcoarsesize); ML_Aggregate_Set_Threshold(ag, context->agg_thresh); if (ML_strcmp(context->agg_spectral_norm,"Calc") == 0) { ML_Set_SpectralNormScheme_Calc(ml); } else if (ML_strcmp(context->agg_spectral_norm,"Anorm") == 0) { ML_Set_SpectralNormScheme_Anorm(ml); } else { printf("**WRN** ML: Unknown spectral norm scheme %s\n",context->agg_spectral_norm); } /* read in the rigid body modes */ Nrigid = 0; if (proc_config[AZ_node] == 0) { sprintf(filename,"data_nullsp%d.txt",Nrigid); sprintf(pathfilename,"%s/%s",argv[1],filename); while( (fp = fopen(pathfilename,"r")) != NULL) { fclose(fp); Nrigid++; sprintf(filename,"data_nullsp%d.txt",Nrigid); sprintf(pathfilename,"%s/%s",argv[1],filename); } } Nrigid = AZ_gsum_int(Nrigid,proc_config); if (Nrigid != 0) { rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) ); if (rigid == NULL) { printf("Error: Not enough space for rigid body modes\n"); } } /* Set rhs */ sprintf(pathfilename,"%s/data_rhs.txt",argv[1]); fp = fopen(pathfilename,"r"); if (fp == NULL) { rhs=(double *)ML_allocate(leng*sizeof(double)); if (proc_config[AZ_node] == 0) printf("taking linear vector for rhs\n"); for (i = 0; i < N_update; i++) rhs[i] = (double) update[i]; } else { fclose(fp); if (proc_config[AZ_node] == 0) printf("reading rhs from a file\n"); AZ_input_msr_matrix(pathfilename, update, &rhs, &garbage, N_update, proc_config); } AZ_reorder_vec(rhs, data_org, update_index, NULL); for (i = 0; i < Nrigid; i++) { sprintf(filename,"data_nullsp%d.txt",i); sprintf(pathfilename,"%s/%s",argv[1],filename); AZ_input_msr_matrix(pathfilename, update, &mode, &garbage, N_update, proc_config); AZ_reorder_vec(mode, data_org, update_index, NULL); #if 0 /* test the given rigid body mode, output-vector should be ~0 */ Amat->matvec(mode, rigid, Amat, proc_config); for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]); #endif for (j = 0; j < i; j++) { alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/ AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); DAXPY_F77(&N_update, &alpha, &(rigid[j*N_update]), &one, mode, &one); } /* rhs orthogonalization */ alpha = -AZ_gdot(N_update, mode, rhs, proc_config)/ AZ_gdot(N_update, mode, mode, proc_config); DAXPY_F77(&N_update, &alpha, mode, &one, rhs, &one); for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j]; free(mode); free(garbage); } for (j = 0; j < Nrigid; j++) { alpha = -AZ_gdot(N_update, rhs, &(rigid[j*N_update]), proc_config)/ AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); DAXPY_F77(&N_update, &alpha, &(rigid[j*N_update]), &one, rhs, &one); } #if 0 /* for testing the default nullsp */ ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, 6, NULL, N_update); #else if (Nrigid != 0) { ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update); } #endif if (rigid) ML_free(rigid); ag->keep_agg_information = 1; coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, 0, ML_INCREASING, ag); coarsest_level--; if ( proc_config[AZ_node] == 0 ) printf("Coarse level = %d \n", coarsest_level); #if 0 /* set up smoothers */ if (!blocks) blocks = (int *) ML_allocate(sizeof(int)*N_update); #endif for (level = 0; level < coarsest_level; level++) { num_PDE_eqns = ml->Amat[level].num_PDEs; /* Sparse approximate inverse smoother that acutally does both */ /* pre and post smoothing. */ if (ML_strcmp(context->smoother,"Parasails") == 0) { ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth, parasails_sym, parasails_thresh, parasails_nlevels, parasails_filter, (int) parasails_loadbal, parasails_factorized); } /* This is the symmetric Gauss-Seidel smoothing that we usually use. */ /* In parallel, it is not a true Gauss-Seidel in that each processor */ /* does a Gauss-Seidel on its local submatrix independent of the */ /* other processors. */ else if (ML_strcmp(context->smoother,"GaussSeidel") == 0) { ML_Gen_Smoother_GaussSeidel(ml , level, ML_BOTH, nsmooth,1.); } else if (ML_strcmp(context->smoother,"SymGaussSeidel") == 0) { ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_BOTH, nsmooth,1.); } else if (ML_strcmp(context->smoother,"Poly") == 0) { ML_Gen_Smoother_Cheby(ml, level, ML_BOTH, 30., nsmooth); } else if (ML_strcmp(context->smoother,"BlockGaussSeidel") == 0) { ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_BOTH, nsmooth,1., num_PDE_eqns); } else if (ML_strcmp(context->smoother,"VBSymGaussSeidel") == 0) { if (blocks) ML_free(blocks); if (block_pde) ML_free(block_pde); blocks = NULL; block_pde = NULL; nblocks = 0; ML_Aggregate_Get_Vblocks_CoarsenScheme_VBMETIS(ag,level,N_levels,&nblocks, &blocks,&block_pde); if (blocks==NULL) ML_Gen_Blocks_Aggregates(ag, level, &nblocks, &blocks); ML_Gen_Smoother_VBlockSymGaussSeidel(ml , level, ML_BOTH, nsmooth,1., nblocks, blocks); } /* This is a true Gauss Seidel in parallel. This seems to work for */ /* elasticity problems. However, I don't believe that this is very */ /* efficient in parallel. */ /* nblocks = ml->Amat[level].invec_leng; for (i =0; i < nblocks; i++) blocks[i] = i; ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml , level, ML_PRESMOOTHER, nsmooth, 1., nblocks, blocks); ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml, level, ML_POSTSMOOTHER, nsmooth, 1., nblocks, blocks); */ /* Jacobi Smoothing */ else if (ML_strcmp(context->smoother,"Jacobi") == 0) { ML_Gen_Smoother_Jacobi(ml , level, ML_PRESMOOTHER, nsmooth,.4); ML_Gen_Smoother_Jacobi(ml , level, ML_POSTSMOOTHER, nsmooth,.4); } /* This does a block Gauss-Seidel (not true GS in parallel) */ /* where each processor has 'nblocks' blocks. */ /* */ else if (ML_strcmp(context->smoother,"Metis") == 0) { if (blocks) ML_free(blocks); if (block_pde) ML_free(block_pde); nblocks = 250; ML_Gen_Blocks_Metis(ml, level, &nblocks, &blocks); ML_Gen_Smoother_VBlockSymGaussSeidel(ml , level, ML_BOTH, nsmooth,1., nblocks, blocks); } else { printf("unknown smoother %s\n",context->smoother); exit(1); } } /* set coarse level solver */ nsmooth = context->coarse_its; /* Sparse approximate inverse smoother that acutally does both */ /* pre and post smoothing. */ if (ML_strcmp(context->coarse_solve,"Parasails") == 0) { ML_Gen_Smoother_ParaSails(ml , coarsest_level, ML_PRESMOOTHER, nsmooth, parasails_sym, parasails_thresh, parasails_nlevels, parasails_filter, (int) parasails_loadbal, parasails_factorized); } else if (ML_strcmp(context->coarse_solve,"GaussSeidel") == 0) { ML_Gen_Smoother_GaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1.); } else if (ML_strcmp(context->coarse_solve,"Poly") == 0) { ML_Gen_Smoother_Cheby(ml, coarsest_level, ML_BOTH, 30., nsmooth); } else if (ML_strcmp(context->coarse_solve,"SymGaussSeidel") == 0) { ML_Gen_Smoother_SymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1.); } else if (ML_strcmp(context->coarse_solve,"BlockGaussSeidel") == 0) { ML_Gen_Smoother_BlockGaussSeidel(ml, coarsest_level, ML_BOTH, nsmooth,1., num_PDE_eqns); } else if (ML_strcmp(context->coarse_solve,"Aggregate") == 0) { if (blocks) ML_free(blocks); if (block_pde) ML_free(block_pde); ML_Gen_Blocks_Aggregates(ag, coarsest_level, &nblocks, &blocks); ML_Gen_Smoother_VBlockSymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1., nblocks, blocks); } else if (ML_strcmp(context->coarse_solve,"Jacobi") == 0) { ML_Gen_Smoother_Jacobi(ml , coarsest_level, ML_BOTH, nsmooth,.5); } else if (ML_strcmp(context->coarse_solve,"Metis") == 0) { if (blocks) ML_free(blocks); if (block_pde) ML_free(block_pde); nblocks = 250; ML_Gen_Blocks_Metis(ml, coarsest_level, &nblocks, &blocks); ML_Gen_Smoother_VBlockSymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1., nblocks, blocks); } else if (ML_strcmp(context->coarse_solve,"SuperLU") == 0) { ML_Gen_CoarseSolverSuperLU( ml, coarsest_level); } else if (ML_strcmp(context->coarse_solve,"Amesos") == 0) { ML_Gen_Smoother_Amesos(ml,coarsest_level,ML_AMESOS_KLU,-1, 0.0); } else { printf("unknown coarse grid solver %s\n",context->coarse_solve); exit(1); } ML_Gen_Solver(ml, ML_MGV, 0, coarsest_level); AZ_defaults(options, params); if (ML_strcmp(context->krylov,"Cg") == 0) { options[AZ_solver] = AZ_cg; } else if (ML_strcmp(context->krylov,"Bicgstab") == 0) { options[AZ_solver] = AZ_bicgstab; } else if (ML_strcmp(context->krylov,"Tfqmr") == 0) { options[AZ_solver] = AZ_tfqmr; } else if (ML_strcmp(context->krylov,"Gmres") == 0) { options[AZ_solver] = AZ_gmres; } else { printf("unknown krylov method %s\n",context->krylov); } if (blocks) ML_free(blocks); if (block_pde) ML_free(block_pde); options[AZ_scaling] = AZ_none; options[AZ_precond] = AZ_user_precond; options[AZ_conv] = AZ_r0; options[AZ_output] = 1; options[AZ_max_iter] = context->max_outer_its; options[AZ_poly_ord] = 5; options[AZ_kspace] = 130; params[AZ_tol] = context->tol; options[AZ_output] = context->output; ML_free(context); AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); setup_time = AZ_second() - start_time; xxx = (double *) malloc( leng*sizeof(double)); for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; /* Set x */ /* there is no initguess supplied with these examples for the moment.... */ fp = fopen("initguessfile","r"); if (fp != NULL) { fclose(fp); if (proc_config[AZ_node]== 0) printf("reading initial guess from file\n"); AZ_input_msr_matrix("data_initguess.txt", update, &xxx, &garbage, N_update, proc_config); options[AZ_conv] = AZ_expected_values; } else if (proc_config[AZ_node]== 0) printf("taking 0 initial guess \n"); AZ_reorder_vec(xxx, data_org, update_index, NULL); /* if Dirichlet BC ... put the answer in */ for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) { if ( (val[i] > .99999999) && (val[i] < 1.0000001)) xxx[i] = rhs[i]; } fp = fopen("AZ_no_multilevel.dat","r"); scaling = AZ_scaling_create(); start_time = AZ_second(); if (fp != NULL) { fclose(fp); options[AZ_precond] = AZ_none; options[AZ_scaling] = AZ_sym_diag; options[AZ_ignore_scaling] = AZ_TRUE; options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); /* options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); */ } else { options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; /* if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); */ } solve_time = AZ_second() - start_time; if (proc_config[AZ_node] == 0) printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time); if (proc_config[AZ_node] == 0) printf("Printing out a few entries of the solution ...\n"); for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 7) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 23) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 47) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 101) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 171) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} ML_Aggregate_Destroy(&ag); ML_Destroy(&ml); AZ_free((void *) Amat->data_org); AZ_free((void *) Amat->val); AZ_free((void *) Amat->bindx); AZ_free((void *) update); AZ_free((void *) external); AZ_free((void *) extern_index); AZ_free((void *) update_index); AZ_scaling_destroy(&scaling); if (Amat != NULL) AZ_matrix_destroy(&Amat); if (Pmat != NULL) AZ_precond_destroy(&Pmat); free(xxx); free(rhs); #ifdef ML_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { int num_PDE_eqns=5, N_levels=3; /* int nsmooth=1; */ int leng, level, N_grid_pts, coarsest_level; /* See Aztec User's Guide for more information on the */ /* variables that follow. */ int proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE]; double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE]; /* data structure for matrix corresponding to the fine grid */ int *data_org = NULL, *update = NULL, *external = NULL; int *update_index = NULL, *extern_index = NULL; int *cpntr = NULL; int *bindx = NULL, N_update, iii; double *val = NULL; double *xxx, *rhs; AZ_MATRIX *Amat; AZ_PRECOND *Pmat = NULL; ML *ml; FILE *fp; int ch,i; struct AZ_SCALING *scaling; double solve_time, setup_time, start_time; ML_Aggregate *ag; int *ivec; #ifdef VBR_VERSION ML_Operator *B, *C, *D; int *vbr_cnptr, *vbr_rnptr, *vbr_indx, *vbr_bindx, *vbr_bnptr, total_blk_rows; int total_blk_cols, blk_space, nz_space; double *vbr_val; struct ML_CSR_MSRdata *csr_data; #endif #ifdef ML_MPI MPI_Init(&argc,&argv); /* get number of processors and the name of this processor */ AZ_set_proc_config(proc_config, MPI_COMM_WORLD); #else AZ_set_proc_config(proc_config, AZ_NOT_MPI); #endif #ifdef binary fp=fopen(".data","rb"); #else fp=fopen(".data","r"); #endif if (fp==NULL) { printf("couldn't open file .data\n"); exit(1); } #ifdef binary fread(&leng, sizeof(int), 1, fp); #else fscanf(fp,"%d",&leng); #endif fclose(fp); N_grid_pts=leng/num_PDE_eqns; /* initialize the list of global indices. NOTE: the list of global */ /* indices must be in ascending order so that subsequent calls to */ /* AZ_find_index() will function properly. */ AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns, AZ_linear); AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config); /* This code is to fix things up so that we are sure we have */ /* all block (including the ghost nodes the same size. */ AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update); AZ_transform(proc_config, &external, bindx, val, update, &update_index, &extern_index, &data_org, N_update, 0, 0, 0, &cpntr, AZ_MSR_MATRIX); Amat = AZ_matrix_create( leng ); #ifndef VBR_VERSION AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL); Amat->matrix_type = data_org[AZ_matrix_type]; data_org[AZ_N_rows] = data_org[AZ_N_internal] + data_org[AZ_N_border]; #else total_blk_rows = N_update/num_PDE_eqns; total_blk_cols = total_blk_rows; blk_space = total_blk_rows*20; nz_space = blk_space*num_PDE_eqns*num_PDE_eqns; vbr_cnptr = (int *) ML_allocate(sizeof(int )*(total_blk_cols+1)); vbr_rnptr = (int *) ML_allocate(sizeof(int )*(total_blk_cols+1)); vbr_bnptr = (int *) ML_allocate(sizeof(int )*(total_blk_cols+2)); vbr_indx = (int *) ML_allocate(sizeof(int )*(blk_space+1)); vbr_bindx = (int *) ML_allocate(sizeof(int )*(blk_space+1)); vbr_val = (double *) ML_allocate(sizeof(double)*(nz_space+1)); for (i = 0; i <= total_blk_cols; i++) vbr_cnptr[i] = num_PDE_eqns; AZ_msr2vbr(vbr_val, vbr_indx, vbr_rnptr, vbr_cnptr, vbr_bnptr, vbr_bindx, bindx, val, total_blk_rows, total_blk_cols, blk_space, nz_space, -1); data_org[AZ_N_rows] = data_org[AZ_N_internal] + data_org[AZ_N_border]; data_org[AZ_N_int_blk] = data_org[AZ_N_internal]/num_PDE_eqns; data_org[AZ_N_bord_blk] = data_org[AZ_N_bord_blk]/num_PDE_eqns; data_org[AZ_N_ext_blk] = data_org[AZ_N_ext_blk]/num_PDE_eqns; data_org[AZ_matrix_type] = AZ_VBR_MATRIX; AZ_set_VBR(Amat, vbr_rnptr, vbr_cnptr, vbr_bnptr, vbr_indx, vbr_bindx, vbr_val, data_org, 0, NULL, AZ_LOCAL); Amat->matrix_type = data_org[AZ_matrix_type]; #endif start_time = AZ_second(); ML_Create(&ml, N_levels); ML_Set_PrintLevel(3); /* set up discretization matrix and matrix vector function */ AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config); ML_Aggregate_Create( &ag ); ML_Aggregate_Set_Threshold(ag,0.0); ML_Set_SpectralNormScheme_PowerMethod(ml); /* To run SA: a) set damping factor to 1 and use power method ML_Aggregate_Set_DampingFactor(ag, 4./3.); To run NSA: a) set damping factor to 0 ML_Aggregate_Set_DampingFactor(ag, 0.); To run NSR a) set damping factor to 1 and use power method ML_Aggregate_Set_DampingFactor(ag, 1.); ag->Restriction_smoothagg_transpose = ML_FALSE; ag->keep_agg_information=1; ag->keep_P_tentative=1; b) hack code so it calls the energy minimizing restriction line 2973 of ml_agg_genP.c c) turn on the NSR flag in ml_agg_energy_min.cpp To run Emin a) set min_eneryg = 2 and keep_agg_info = 1; ag->minimizing_energy=2; ag->keep_agg_information=1; ag->cheap_minimizing_energy = 0; ag->block_scaled_SA = 1; */ ag->minimizing_energy=2; ag->keep_agg_information=1; ag->block_scaled_SA = 1; ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, num_PDE_eqns, NULL, N_update); ML_Aggregate_Set_MaxCoarseSize( ag, 20); /* ML_Aggregate_Set_RandomOrdering( ag ); ML_Aggregate_Set_DampingFactor(ag, .1); ag->drop_tol_for_smoothing = 1.0e-3; ML_Aggregate_Set_Threshold(ag, 1.0e-3); ML_Aggregate_Set_MaxCoarseSize( ag, 300); */ coarsest_level = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml, N_levels-1, ML_DECREASING, ag); coarsest_level = N_levels - coarsest_level; if ( proc_config[AZ_node] == 0 ) printf("Coarse level = %d \n", coarsest_level); /* set up smoothers */ AZ_defaults(options, params); for (level = N_levels-1; level > coarsest_level; level--) { /* This is the Aztec domain decomp/ilu smoother that we */ /* usually use for this problem. */ /* options[AZ_precond] = AZ_dom_decomp; options[AZ_subdomain_solve] = AZ_ilut; params[AZ_ilut_fill] = 1.0; options[AZ_reorder] = 1; ML_Gen_SmootherAztec(ml, level, options, params, proc_config, status, AZ_ONLY_PRECONDITIONER, ML_PRESMOOTHER,NULL); */ /* Sparse approximate inverse smoother that acutally does both */ /* pre and post smoothing. */ /* ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth, parasails_sym, parasails_thresh, parasails_nlevels, parasails_filter, parasails_loadbal, parasails_factorized); parasails_thresh /= 4.; */ /* This is the symmetric Gauss-Seidel smoothing. In parallel, */ /* it is not a true Gauss-Seidel in that each processor */ /* does a Gauss-Seidel on its local submatrix independent of the */ /* other processors. */ /* ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_PRESMOOTHER, nsmooth,1.); ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_POSTSMOOTHER,nsmooth,1.); */ /* Block Gauss-Seidel with block size equal to #DOF per node. */ /* Not a true Gauss-Seidel in that each processor does a */ /* Gauss-Seidel on its local submatrix independent of the other */ /* processors. */ /* ML_Gen_Smoother_BlockGaussSeidel(ml,level,ML_PRESMOOTHER, nsmooth,0.67, num_PDE_eqns); ML_Gen_Smoother_BlockGaussSeidel(ml,level,ML_POSTSMOOTHER, nsmooth, 0.67, num_PDE_eqns); */ ML_Gen_Smoother_SymBlockGaussSeidel(ml,level,ML_POSTSMOOTHER, 1, 1.0, num_PDE_eqns); } ML_Gen_CoarseSolverSuperLU( ml, coarsest_level); ML_Gen_Solver(ml, ML_MGW, N_levels-1, coarsest_level); AZ_defaults(options, params); options[AZ_solver] = AZ_gmres; options[AZ_scaling] = AZ_none; options[AZ_precond] = AZ_user_precond; /* options[AZ_conv] = AZ_r0; */ options[AZ_output] = 1; options[AZ_max_iter] = 1500; options[AZ_poly_ord] = 5; options[AZ_kspace] = 130; params[AZ_tol] = 1.0e-8; /* options[AZ_precond] = AZ_dom_decomp; options[AZ_subdomain_solve] = AZ_ilut; params[AZ_ilut_fill] = 2.0; */ AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); setup_time = AZ_second() - start_time; xxx = (double *) malloc( leng*sizeof(double)); rhs=(double *)malloc(leng*sizeof(double)); for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; /* Set rhs */ fp = fopen("AZ_capture_rhs.mat","r"); if (fp == NULL) { if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n"); AZ_random_vector(rhs, data_org, proc_config); AZ_reorder_vec(rhs, data_org, update_index, NULL); } else { fclose(fp); ivec =(int *)malloc((leng+1)*sizeof(int)); AZ_input_msr_matrix("AZ_capture_rhs.mat", update, &rhs, &ivec, N_update, proc_config); free(ivec); AZ_reorder_vec(rhs, data_org, update_index, NULL); } /* Set x */ fp = fopen("AZ_capture_init_guess.mat","r"); if (fp != NULL) { fclose(fp); ivec =(int *)malloc((leng+1)*sizeof(int)); AZ_input_msr_matrix("AZ_capture_init_guess.mat",update, &xxx, &ivec, N_update, proc_config); free(ivec); AZ_reorder_vec(xxx, data_org, update_index, NULL); } /* if Dirichlet BC ... put the answer in */ for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) { if ( (val[i] > .99999999) && (val[i] < 1.0000001)) xxx[i] = rhs[i]; } fp = fopen("AZ_no_multilevel.dat","r"); scaling = AZ_scaling_create(); start_time = AZ_second(); if (fp != NULL) { fclose(fp); options[AZ_precond] = AZ_none; options[AZ_scaling] = AZ_sym_diag; options[AZ_ignore_scaling] = AZ_TRUE; options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); /* options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); */ } else { options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; /* if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); */ } solve_time = AZ_second() - start_time; if (proc_config[AZ_node] == 0) printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time); ML_Aggregate_Destroy(&ag); ML_Destroy(&ml); AZ_free((void *) Amat->data_org); AZ_free((void *) Amat->val); AZ_free((void *) Amat->bindx); AZ_free((void *) update); AZ_free((void *) external); AZ_free((void *) extern_index); AZ_free((void *) update_index); AZ_scaling_destroy(&scaling); if (Amat != NULL) AZ_matrix_destroy(&Amat); if (Pmat != NULL) AZ_precond_destroy(&Pmat); free(xxx); free(rhs); #ifdef ML_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { int num_PDE_eqns=3, N_levels=3, nsmooth=1; int leng, level, N_grid_pts, coarsest_level; /* See Aztec User's Guide for more information on the */ /* variables that follow. */ int proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE]; double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE]; /* data structure for matrix corresponding to the fine grid */ int *data_org = NULL, *update = NULL, *external = NULL; int *update_index = NULL, *extern_index = NULL; int *cpntr = NULL; int *bindx = NULL, N_update, iii; double *val = NULL; double *xxx, *rhs; AZ_MATRIX *Amat; AZ_PRECOND *Pmat = NULL; ML *ml; FILE *fp; int ch,i,j, Nrigid, *garbage; struct AZ_SCALING *scaling; double solve_time, setup_time, start_time, *mode, *rigid; ML_Aggregate *ag; int nblocks, *blocks; char filename[80]; double alpha; int one = 1; #ifdef ML_MPI MPI_Init(&argc,&argv); /* get number of processors and the name of this processor */ AZ_set_proc_config(proc_config, MPI_COMM_WORLD); #else AZ_set_proc_config(proc_config, AZ_NOT_MPI); #endif leng = 0; if (proc_config[AZ_node] == 0) { #ifdef binary fp=fopen(".data","rb"); #else fp=fopen(".data","r"); #endif if (fp==NULL) { printf("couldn't open file .data\n"); exit(1); } #ifdef binary fread(&leng, sizeof(int), 1, fp); #else fscanf(fp,"%d",&leng); #endif fclose(fp); } leng = AZ_gsum_int(leng, proc_config); N_grid_pts=leng/num_PDE_eqns; /* initialize the list of global indices. NOTE: the list of global */ /* indices must be in ascending order so that subsequent calls to */ /* AZ_find_index() will function properly. */ AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns, AZ_linear); AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config); AZ_transform(proc_config, &external, bindx, val, update, &update_index, &extern_index, &data_org, N_update, 0, 0, 0, &cpntr, AZ_MSR_MATRIX); Amat = AZ_matrix_create( leng ); AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL); Amat->matrix_type = data_org[AZ_matrix_type]; data_org[AZ_N_rows] = data_org[AZ_N_internal] + data_org[AZ_N_border]; start_time = AZ_second(); AZ_defaults(options, params); /* scaling = AZ_scaling_create(); xxx = (double *) calloc( leng,sizeof(double)); rhs=(double *)calloc(leng,sizeof(double)); options[AZ_scaling] = AZ_sym_diag; options[AZ_precond] = AZ_none; options[AZ_max_iter] = 30; options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); don't forget vector rescaling ... free(xxx); free(rhs); */ options[AZ_scaling] = AZ_none; ML_Create(&ml, N_levels); /* set up discretization matrix and matrix vector function */ AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config); ML_Aggregate_Create( &ag ); Nrigid = 0; if (proc_config[AZ_node] == 0) { sprintf(filename,"rigid_body_mode%d",Nrigid+1); while( (fp = fopen(filename,"r")) != NULL) { fclose(fp); Nrigid++; sprintf(filename,"rigid_body_mode%d",Nrigid+1); } } Nrigid = AZ_gsum_int(Nrigid,proc_config); if (Nrigid != 0) { rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) ); if (rigid == NULL) { printf("Error: Not enough space for rigid body modes\n"); } } rhs=(double *)malloc(leng*sizeof(double)); AZ_random_vector(rhs, data_org, proc_config); for (i = 0; i < Nrigid; i++) { sprintf(filename,"rigid_body_mode%d",i+1); AZ_input_msr_matrix(filename, update, &mode, &garbage, N_update, proc_config); /* AZ_sym_rescale_sl(mode, Amat->data_org, options, proc_config, scaling); */ /* Amat->matvec(mode, rigid, Amat, proc_config); for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]); */ for (j = 0; j < i; j++) { alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); daxpy_(&N_update, &alpha, &(rigid[j*N_update]), &one, mode, &one); printf("alpha1 is %e\n",alpha); } alpha = -AZ_gdot(N_update, mode, rhs, proc_config)/AZ_gdot(N_update, mode, mode, proc_config); printf("alpha2 is %e\n",alpha); daxpy_(&N_update, &alpha, mode, &one, rhs, &one); for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j]; free(mode); free(garbage); } for (j = 0; j < Nrigid; j++) { alpha = -AZ_gdot(N_update, rhs, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); daxpy_(&N_update, &alpha, &(rigid[j*N_update]), &one, rhs, &one); printf("alpha4 is %e\n",alpha); } for (i = 0; i < Nrigid; i++) { alpha = -AZ_gdot(N_update, &(rigid[i*N_update]), rhs, proc_config); printf("alpha is %e\n",alpha); } if (Nrigid != 0) { ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update); /* free(rigid); */ } coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, N_levels-1, ML_DECREASING, ag); coarsest_level = N_levels - coarsest_level; /* ML_Operator_Print(&(ml->Pmat[N_levels-2]), "Pmat"); exit(1); */ if ( proc_config[AZ_node] == 0 ) printf("Coarse level = %d \n", coarsest_level); /* set up smoothers */ for (level = N_levels-1; level > coarsest_level; level--) { j = 10; if (level == N_levels-1) j = 10; options[AZ_solver] = AZ_cg; options[AZ_precond]=AZ_sym_GS; options[AZ_subdomain_solve]=AZ_icc; /* options[AZ_precond] = AZ_none; */ options[AZ_poly_ord] = 5; ML_Gen_SmootherAztec(ml, level, options, params, proc_config, status, j, ML_PRESMOOTHER,NULL); ML_Gen_SmootherAztec(ml, level, options, params, proc_config, status, j, ML_POSTSMOOTHER,NULL); /* ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth,1.0); ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth,1.0); */ /* nblocks = ML_Aggregate_Get_AggrCount( ag, level ); ML_Aggregate_Get_AggrMap( ag, level, &blocks); ML_Gen_Smoother_VBlockSymGaussSeidel( ml , level, ML_BOTH, nsmooth, 1.0, nblocks, blocks); ML_Gen_Smoother_VBlockSymGaussSeidel( ml , level, ML_POSTSMOOTHER, nsmooth, 1.0, nblocks, blocks); */ /* ML_Gen_Smoother_VBlockJacobi( ml , level, ML_PRESMOOTHER, nsmooth, .5, nblocks, blocks); ML_Gen_Smoother_VBlockJacobi( ml , level, ML_POSTSMOOTHER, nsmooth,.5, nblocks, blocks); */ /* ML_Gen_Smoother_GaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth); ML_Gen_Smoother_GaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth); */ /* need to change this when num_pdes is different on different levels */ /* if (level == N_levels-1) { ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth, 0.5, num_PDE_eqns); ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth, 0.5, num_PDE_eqns); } else { ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth, 0.5, 2*num_PDE_eqns); ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth, 0.5, 2*num_PDE_eqns); } */ /* */ /* ML_Gen_SmootherJacobi(ml , level, ML_PRESMOOTHER, nsmooth, .67); ML_Gen_SmootherJacobi(ml , level, ML_POSTSMOOTHER, nsmooth, .67 ); */ } /* ML_Gen_CoarseSolverSuperLU( ml, coarsest_level); */ /* ML_Gen_SmootherSymGaussSeidel(ml , coarsest_level, ML_PRESMOOTHER, 2*nsmooth,1.); */ /* ML_Gen_SmootherBlockGaussSeidel(ml , level, ML_PRESMOOTHER, 50*nsmooth, 1.0, 2*num_PDE_eqns); */ ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, 2*nsmooth, 1.0, num_PDE_eqns); ML_Gen_Solver(ml, ML_MGV, N_levels-1, coarsest_level); AZ_defaults(options, params); options[AZ_solver] = AZ_GMRESR; options[AZ_scaling] = AZ_none; options[AZ_precond] = AZ_user_precond; options[AZ_conv] = AZ_rhs; options[AZ_output] = 1; options[AZ_max_iter] = 1500; options[AZ_poly_ord] = 5; options[AZ_kspace] = 130; params[AZ_tol] = 1.0e-8; AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); setup_time = AZ_second() - start_time; xxx = (double *) malloc( leng*sizeof(double)); /* Set rhs */ fp = fopen("AZ_capture_rhs.dat","r"); if (fp == NULL) { if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n"); /* AZ_random_vector(rhs, data_org, proc_config); AZ_reorder_vec(rhs, data_org, update_index, NULL); AZ_random_vector(xxx, data_org, proc_config); AZ_reorder_vec(xxx, data_org, update_index, NULL); Amat->matvec(xxx, rhs, Amat, proc_config); */ } else { ch = getc(fp); if (ch == 'S') { while ( (ch = getc(fp)) != '\n') ; } else ungetc(ch,fp); for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) fscanf(fp,"%lf",&(rhs[i])); fclose(fp); } for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; /* Set x */ fp = fopen("AZ_capture_init_guess.dat","r"); if (fp != NULL) { ch = getc(fp); if (ch == 'S') { while ( (ch = getc(fp)) != '\n') ; } else ungetc(ch,fp); for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) fscanf(fp,"%lf",&(xxx[i])); fclose(fp); options[AZ_conv] = AZ_expected_values; } /* if Dirichlet BC ... put the answer in */ for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) { if ( (val[i] > .99999999) && (val[i] < 1.0000001)) xxx[i] = rhs[i]; } fp = fopen("AZ_no_multilevel.dat","r"); scaling = AZ_scaling_create(); start_time = AZ_second(); if (fp != NULL) { fclose(fp); options[AZ_precond] = AZ_none; options[AZ_scaling] = AZ_sym_diag; options[AZ_ignore_scaling] = AZ_TRUE; options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); /* options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); */ } else { options[AZ_keep_info] = 1; /* options[AZ_max_iter] = 40; */ AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); for (j = 0; j < Nrigid; j++) { alpha = -AZ_gdot(N_update, xxx, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); daxpy_(&N_update, &alpha, &(rigid[j*N_update]), &one, xxx, &one); printf("alpha5 is %e\n",alpha); } AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; /* if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); */ } solve_time = AZ_second() - start_time; if (proc_config[AZ_node] == 0) printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time); ML_Aggregate_Destroy(&ag); ML_Destroy(&ml); AZ_free((void *) Amat->data_org); AZ_free((void *) Amat->val); AZ_free((void *) Amat->bindx); AZ_free((void *) update); AZ_free((void *) external); AZ_free((void *) extern_index); AZ_free((void *) update_index); if (Amat != NULL) AZ_matrix_destroy(&Amat); if (Pmat != NULL) AZ_precond_destroy(&Pmat); free(xxx); free(rhs); #ifdef ML_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { char global[]="global"; char local[]="local"; int proc_config[AZ_PROC_SIZE];/* Processor information. */ int options[AZ_OPTIONS_SIZE]; /* Array used to select solver options. */ double params[AZ_PARAMS_SIZE]; /* User selected solver paramters. */ int *data_org; /* Array to specify data layout */ double status[AZ_STATUS_SIZE]; /* Information returned from AZ_solve(). */ int *update; /* vector elements updated on this node. */ int *external; /* vector elements needed by this node. */ int *update_index; /* ordering of update[] and external[] */ int *extern_index; /* locally on this processor. */ int *indx; /* MSR format of real and imag parts */ int *bindx; int *bpntr; int *rpntr; int *cpntr; AZ_MATRIX *Amat; AZ_PRECOND *Prec; double *val; double *x, *b, *xexact, *xsolve; int n_nonzeros, n_blk_nonzeros; int N_update; /* # of block unknowns updated on this node */ int N_local; /* Number scalar equations on this node */ int N_global, N_blk_global; /* Total number of equations */ int N_external, N_blk_eqns; double *val_msr; int *bindx_msr; double norm, d ; int matrix_type; int has_global_indices, option; int i, j, m, mp ; int ione = 1; #ifdef TEST_SINGULAR double * xnull; /* will contain difference of given exact solution and computed solution*/ double * Axnull; /* Product of A time xnull */ double norm_Axnull; #endif #ifdef AZTEC_MPI double MPI_Wtime(void) ; #endif double time ; #ifdef AZTEC_MPI MPI_Init(&argc,&argv); #endif /* get number of processors and the name of this processor */ #ifdef AZTEC_MPI AZ_set_proc_config(proc_config,MPI_COMM_WORLD); #else AZ_set_proc_config(proc_config,0); #endif printf("proc %d of %d is alive\n", proc_config[AZ_node],proc_config[AZ_N_procs]) ; #ifdef AZTEC_MPI MPI_Barrier(MPI_COMM_WORLD) ; #endif #ifdef VBRMATRIX if(argc != 3) perror("error: enter name of data and partition file on command line") ; #else if(argc != 2) perror("error: enter name of data file on command line") ; #endif /* Set exact solution to NULL */ xexact = NULL; /* Read matrix file and distribute among processors. Returns with this processor's set of rows */ #ifdef VBRMATRIX read_hb(argv[1], proc_config, &N_global, &n_nonzeros, &val_msr, &bindx_msr, &x, &b, &xexact); create_vbr(argv[2], proc_config, &N_global, &N_blk_global, &n_nonzeros, &n_blk_nonzeros, &N_update, &update, bindx_msr, val_msr, &val, &indx, &rpntr, &cpntr, &bpntr, &bindx); if(proc_config[AZ_node] == 0) { free ((void *) val_msr); free ((void *) bindx_msr); free ((void *) cpntr); } matrix_type = AZ_VBR_MATRIX; #ifdef AZTEC_MPI MPI_Barrier(MPI_COMM_WORLD) ; #endif distrib_vbr_matrix( proc_config, N_global, N_blk_global, &n_nonzeros, &n_blk_nonzeros, &N_update, &update, &val, &indx, &rpntr, &cpntr, &bpntr, &bindx, &x, &b, &xexact); #else read_hb(argv[1], proc_config, &N_global, &n_nonzeros, &val, &bindx, &x, &b, &xexact); #ifdef AZTEC_MPI MPI_Barrier(MPI_COMM_WORLD) ; #endif distrib_msr_matrix(proc_config, N_global, &n_nonzeros, &N_update, &update, &val, &bindx, &x, &b, &xexact); #ifdef DEBUG for (i = 0; i<N_update; i++) if (val[i] == 0.0 ) printf("Zero diagonal at row %d\n",i); #endif matrix_type = AZ_MSR_MATRIX; #endif /* convert matrix to a local distributed matrix */ cpntr = NULL; AZ_transform(proc_config, &external, bindx, val, update, &update_index, &extern_index, &data_org, N_update, indx, bpntr, rpntr, &cpntr, matrix_type); printf("Processor %d: Completed AZ_transform\n",proc_config[AZ_node]) ; has_global_indices = 0; option = AZ_LOCAL; #ifdef VBRMATRIX N_local = rpntr[N_update]; #else N_local = N_update; #endif Amat = AZ_matrix_create(N_local); #ifdef VBRMATRIX AZ_set_VBR(Amat, rpntr, cpntr, bpntr, indx, bindx, val, data_org, N_update, update, option); #else AZ_set_MSR(Amat, bindx, val, data_org, N_update, update, option); #endif printf("proc %d Completed AZ_create_matrix\n",proc_config[AZ_node]) ; #ifdef AZTEC_MPI MPI_Barrier(MPI_COMM_WORLD) ; #endif /* initialize AZTEC options */ AZ_defaults(options, params); options[AZ_solver] = AZ_gmres; options[AZ_precond] = AZ_sym_GS; options[AZ_poly_ord] = 1; options[AZ_graph_fill] = 1; params[AZ_rthresh] = 0.0E-7; params[AZ_athresh] = 0.0E-7; options[AZ_overlap] = 1; /* params[AZ_ilut_fill] = 2.0; params[AZ_drop] = 0.01; options[AZ_overlap] = 0; options[AZ_reorder] = 0; params[AZ_rthresh] = 1.0E-1; params[AZ_athresh] = 1.0E-1; options[AZ_precond] = AZ_dom_decomp ; options[AZ_subdomain_solve] = AZ_bilu_ifp; options[AZ_reorder] = 0; options[AZ_graph_fill] = 0; params[AZ_rthresh] = 1.0E-7; params[AZ_athresh] = 1.0E-7; options[AZ_poly_ord] = 1; options[AZ_precond] = AZ_Jacobi; params[AZ_omega] = 1.0; options[AZ_precond] = AZ_none ; options[AZ_poly_ord] = 1; options[AZ_precond] = AZ_Jacobi ; options[AZ_scaling] = AZ_sym_row_sum ; options[AZ_scaling] = AZ_sym_diag; options[AZ_conv] = AZ_noscaled; options[AZ_scaling] = AZ_Jacobi ; options[AZ_precond] = AZ_dom_decomp ; options[AZ_subdomain_solve] = AZ_icc ; options[AZ_subdomain_solve] = AZ_ilut ; params[AZ_omega] = 1.2; params[AZ_ilut_fill] = 2.0; params[AZ_drop] = 0.01; options[AZ_reorder] = 0; options[AZ_overlap] = 0; options[AZ_type_overlap] = AZ_symmetric; options[AZ_precond] = AZ_dom_decomp ; options[AZ_subdomain_solve] = AZ_bilu ; options[AZ_graph_fill] = 0; options[AZ_overlap] = 0; options[AZ_precond] = AZ_dom_decomp ; options[AZ_subdomain_solve] = AZ_bilu_ifp ; options[AZ_graph_fill] = 0; options[AZ_overlap] = 0; params[AZ_rthresh] = 1.0E-3; params[AZ_athresh] = 1.0E-3; options[AZ_poly_ord] = 1; options[AZ_precond] = AZ_Jacobi ; */ options[AZ_kspace] = 600 ; options[AZ_max_iter] = 600 ; params[AZ_tol] = 1.0e-14; #ifdef BGMRES options[AZ_gmres_blocksize] = 3; options[AZ_gmres_num_rhs] = 1; #endif #ifdef DEBUG if (proc_config[AZ_N_procs]==1) write_vec("rhs.dat", N_local, b); #endif /* xsolve is a little longer vector needed to account for external entries. Make it and copy x (initial guess) into it. */ if (has_global_indices) { N_external = 0; } else { N_external = data_org[AZ_N_external]; } xsolve = (double *) calloc(N_local + N_external, sizeof(double)) ; for (i=0; i<N_local; i++) xsolve[i] = x[i]; /* Reorder rhs and xsolve to match matrix ordering from AZ_transform */ if (!has_global_indices) { AZ_reorder_vec(b, data_org, update_index, rpntr) ; AZ_reorder_vec(xsolve, data_org, update_index, rpntr) ; } #ifdef VBRMATRIX AZ_check_vbr(N_update, data_org[AZ_N_ext_blk], AZ_LOCAL, bindx, bpntr, cpntr, rpntr, proc_config); #else AZ_check_msr(bindx, N_update, N_external, AZ_LOCAL, proc_config); #endif printf("Processor %d of %d N_local = %d N_external = %d NNZ = %d\n", proc_config[AZ_node],proc_config[AZ_N_procs],N_local,N_external, n_nonzeros); /* solve the system of equations using b as the right hand side */ Prec = AZ_precond_create(Amat,AZ_precondition, NULL); AZ_iterate(xsolve, b, options, params, status, proc_config, Amat, Prec, NULL); /*AZ_ifpack_iterate(xsolve, b, options, params, status, proc_config, Amat);*/ if (proc_config[AZ_node]==0) { printf("True residual norm = %22.16g\n",status[AZ_r]); printf("True scaled res = %22.16g\n",status[AZ_scaled_r]); printf("Computed res norm = %22.16g\n",status[AZ_rec_r]); } #ifdef TEST_SINGULAR xnull = (double *) calloc(N_local + N_external, sizeof(double)) ; Axnull = (double *) calloc(N_local + N_external, sizeof(double)) ; for (i=0; i<N_local; i++) xnull[i] = xexact[i]; if (!has_global_indices) AZ_reorder_vec(xnull, data_org, update_index, rpntr); for (i=0; i<N_local; i++) xnull[i] -= xsolve[i]; /* fill with nullerence */ Amat->matvec(xnull, Axnull, Amat, proc_config); norm_Axnull = AZ_gvector_norm(N_local, 2, Axnull, proc_config); if (proc_config[AZ_node]==0) printf("Norm of A(xexact-xsolve) = %12.4g\n",norm_Axnull); free((void *) xnull); free((void *) Axnull); #endif /* Get solution back into original ordering */ if (!has_global_indices) { AZ_invorder_vec(xsolve, data_org, update_index, rpntr, x); free((void *) xsolve); } else { free((void *) x); x = xsolve; } #ifdef DEBUG if (proc_config[AZ_N_procs]==1) write_vec("solution.dat", N_local, x); #endif if (xexact != NULL) { double sum = 0.0; double largest = 0.0; for (i=0; i<N_local; i++) sum += fabs(x[i]-xexact[i]); printf("Processor %d: Difference between exact and computed solution = %12.4g\n", proc_config[AZ_node],sum); for (i=0; i<N_local; i++) largest = AZ_MAX(largest,fabs(xexact[i])); printf("Processor %d: Difference divided by max abs value of exact = %12.4g\n", proc_config[AZ_node],sum/largest); } free((void *) val); free((void *) bindx); #ifdef VBRMATRIX free((void *) rpntr); free((void *) bpntr); free((void *) indx); #endif free((void *) b); free((void *) x); if (xexact!=NULL) free((void *) xexact); AZ_free((void *) update); AZ_free((void *) update_index); AZ_free((void *) external); AZ_free((void *) extern_index); AZ_free((void *) data_org); if (cpntr!=NULL) AZ_free((void *) cpntr); AZ_precond_destroy(&Prec); AZ_matrix_destroy(&Amat); #ifdef AZTEC_MPI MPI_Finalize() ; #endif /* end main */ return 0 ; }
int main(int argc, char *argv[]) { int num_PDE_eqns=6, N_levels=4, nsmooth=2; int leng, level, N_grid_pts, coarsest_level; /* See Aztec User's Guide for more information on the */ /* variables that follow. */ int proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE]; double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE]; /* data structure for matrix corresponding to the fine grid */ double *val = NULL, *xxx, *rhs, solve_time, setup_time, start_time; AZ_MATRIX *Amat; AZ_PRECOND *Pmat = NULL; ML *ml; FILE *fp; int i, j, Nrigid, *garbage = NULL; #ifdef ML_partition int nblocks; int *block_list = NULL; int k; #endif struct AZ_SCALING *scaling; ML_Aggregate *ag; double *mode, *rigid; char filename[80]; double alpha; int allocated = 0; int old_prec, old_sol; double old_tol; /* double *Amode, beta, biggest; int big_ind = -1, ii; */ ML_Operator *Amatrix; int *rowi_col = NULL, rowi_N, count2, ccc; double *rowi_val = NULL; double max_diag, min_diag, max_sum, sum; int nBlocks, *blockIndices, Ndof; #ifdef ML_partition FILE *fp2; int count; if (argc != 2) { printf("Usage: ml_read_elas num_processors\n"); exit(1); } else sscanf(argv[1],"%d",&nblocks); #endif #ifdef HAVE_MPI MPI_Init(&argc,&argv); /* get number of processors and the name of this processor */ AZ_set_proc_config(proc_config, MPI_COMM_WORLD); #else AZ_set_proc_config(proc_config, AZ_NOT_MPI); #endif /* read in the number of matrix equations */ leng = 0; if (proc_config[AZ_node] == 0) { # ifdef binary fp=fopen(".data","rb"); # else fp=fopen(".data","r"); # endif if (fp==NULL) { printf("couldn't open file .data\n"); exit(1); } # ifdef binary fread(&leng, sizeof(int), 1, fp); # else fscanf(fp,"%d",&leng); # endif fclose(fp); } leng = AZ_gsum_int(leng, proc_config); N_grid_pts=leng/num_PDE_eqns; /* initialize the list of global indices. NOTE: the list of global */ /* indices must be in ascending order so that subsequent calls to */ /* AZ_find_index() will function properly. */ if (proc_config[AZ_N_procs] == 1) i = AZ_linear; else i = AZ_file; AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns,i); AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config); /* This code is to fix things up so that we are sure we have */ /* all block (including the ghost nodes the same size. */ AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update); AZ_transform_norowreordering(proc_config, &external, bindx, val, update, &update_index, &extern_index, &data_org, N_update, 0, 0, 0, &cpntr, AZ_MSR_MATRIX); Amat = AZ_matrix_create( leng ); AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL); Amat->matrix_type = data_org[AZ_matrix_type]; data_org[AZ_N_rows] = data_org[AZ_N_internal] + data_org[AZ_N_border]; #ifdef SCALE_ME ML_MSR_sym_diagonal_scaling(Amat, proc_config, &scaling_vect); #endif start_time = AZ_second(); options[AZ_scaling] = AZ_none; ML_Create(&ml, N_levels); ML_Set_PrintLevel(10); /* set up discretization matrix and matrix vector function */ AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config); #ifdef ML_partition /* this code is meant to partition the matrices so that things can be */ /* run in parallel later. */ /* It is meant to be run on only one processor. */ #ifdef MB_MODIF fp2 = fopen(".update","w"); #else fp2 = fopen("partition_file","w"); #endif ML_Operator_AmalgamateAndDropWeak(&(ml->Amat[N_levels-1]), num_PDE_eqns, 0.0); ML_Gen_Blocks_Metis(ml, N_levels-1, &nblocks, &block_list); for (i = 0; i < nblocks; i++) { count = 0; for (j = 0; j < ml->Amat[N_levels-1].outvec_leng; j++) { if (block_list[j] == i) count++; } fprintf(fp2," %d\n",count*num_PDE_eqns); for (j = 0; j < ml->Amat[N_levels-1].outvec_leng; j++) { if (block_list[j] == i) { for (k = 0; k < num_PDE_eqns; k++) fprintf(fp2,"%d\n",j*num_PDE_eqns+k); } } } fclose(fp2); ML_Operator_UnAmalgamateAndDropWeak(&(ml->Amat[N_levels-1]),num_PDE_eqns,0.0); #ifdef MB_MODIF printf(" partition file dumped in .update\n"); #endif exit(1); #endif ML_Aggregate_Create( &ag ); /* ML_Aggregate_Set_CoarsenScheme_MIS(ag); */ #ifdef MB_MODIF ML_Aggregate_Set_DampingFactor(ag,1.50); #else ML_Aggregate_Set_DampingFactor(ag,1.5); #endif ML_Aggregate_Set_CoarsenScheme_METIS(ag); ML_Aggregate_Set_NodesPerAggr( ml, ag, -1, 35); /* ML_Aggregate_Set_Phase3AggregateCreationAggressiveness(ag, 10.001); */ ML_Aggregate_Set_Threshold(ag, 0.0); ML_Aggregate_Set_MaxCoarseSize( ag, 300); /* read in the rigid body modes */ Nrigid = 0; /* to ensure compatibility with RBM dumping software */ if (proc_config[AZ_node] == 0) { sprintf(filename,"rigid_body_mode%02d",Nrigid+1); while( (fp = fopen(filename,"r")) != NULL) { which_filename = 1; fclose(fp); Nrigid++; sprintf(filename,"rigid_body_mode%02d",Nrigid+1); } sprintf(filename,"rigid_body_mode%d",Nrigid+1); while( (fp = fopen(filename,"r")) != NULL) { fclose(fp); Nrigid++; sprintf(filename,"rigid_body_mode%d",Nrigid+1); } } Nrigid = AZ_gsum_int(Nrigid,proc_config); if (Nrigid != 0) { rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) ); if (rigid == NULL) { printf("Error: Not enough space for rigid body modes\n"); } } rhs = (double *) malloc(leng*sizeof(double)); xxx = (double *) malloc(leng*sizeof(double)); for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; for (i = 0; i < Nrigid; i++) { if (which_filename == 1) sprintf(filename,"rigid_body_mode%02d",i+1); else sprintf(filename,"rigid_body_mode%d",i+1); AZ_input_msr_matrix(filename,update,&mode,&garbage,N_update,proc_config); AZ_reorder_vec(mode, data_org, update_index, NULL); /* here is something to stick a rigid body mode as the initial */ /* The idea is to solve A x = 0 without smoothing with a two */ /* level method. If everything is done properly, we should */ /* converge in 2 iterations. */ /* Note: we must also zero out components of the rigid body */ /* mode that correspond to Dirichlet bcs. */ if (i == -4) { for (iii = 0; iii < leng; iii++) xxx[iii] = mode[iii]; ccc = 0; Amatrix = &(ml->Amat[N_levels-1]); for (iii = 0; iii < Amatrix->outvec_leng; iii++) { ML_get_matrix_row(Amatrix,1,&iii,&allocated,&rowi_col,&rowi_val, &rowi_N, 0); count2 = 0; for (j = 0; j < rowi_N; j++) if (rowi_val[j] != 0.) count2++; if (count2 <= 1) { xxx[iii] = 0.; ccc++; } } free(rowi_col); free(rowi_val); allocated = 0; rowi_col = NULL; rowi_val = NULL; } /* * Rescale matrix/rigid body modes and checking * AZ_sym_rescale_sl(mode, Amat->data_org, options, proc_config, scaling); Amat->matvec(mode, rigid, Amat, proc_config); for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]); */ /* Here is some code to check that the rigid body modes are */ /* really rigid body modes. The idea is to multiply by A and */ /* then to zero out things that we "think" are boundaries. */ /* In this hardwired example, things near boundaries */ /* correspond to matrix rows that do not have 81 nonzeros. */ /* Amode = (double *) malloc(leng*sizeof(double)); Amat->matvec(mode, Amode, Amat, proc_config); j = 0; biggest = 0.0; for (ii = 0; ii < N_update; ii++) { if ( Amat->bindx[ii+1] - Amat->bindx[ii] != 80) { Amode[ii] = 0.; j++; } else { if ( fabs(Amode[ii]) > biggest) { biggest=fabs(Amode[ii]); big_ind = ii; } } } printf("%d entries zeroed out of %d elements\n",j,N_update); alpha = AZ_gdot(N_update, Amode, Amode, proc_config); beta = AZ_gdot(N_update, mode, mode, proc_config); printf("||A r||^2 =%e, ||r||^2 = %e, ratio = %e\n", alpha,beta,alpha/beta); printf("the biggest is %e at row %d\n",biggest,big_ind); free(Amode); */ /* orthogonalize mode with respect to previous modes. */ for (j = 0; j < i; j++) { alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/ AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); /* daxpy_(&N_update,&alpha,&(rigid[j*N_update]), &one, mode, &one); */ } #ifndef MB_MODIF printf(" after mb %e %e %e\n",mode[0],mode[1],mode[2]); #endif for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j]; free(mode); free(garbage); garbage = NULL; } if (Nrigid != 0) { ML_Aggregate_Set_BlockDiagScaling(ag); ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update); free(rigid); } #ifdef SCALE_ME ML_Aggregate_Scale_NullSpace(ag, scaling_vect, N_update); #endif coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, N_levels-1, ML_DECREASING, ag); AZ_defaults(options, params); coarsest_level = N_levels - coarsest_level; if ( proc_config[AZ_node] == 0 ) printf("Coarse level = %d \n", coarsest_level); /* set up smoothers */ for (level = N_levels-1; level > coarsest_level; level--) { /* ML_Gen_Smoother_BlockGaussSeidel(ml, level,ML_BOTH, 1, 1., num_PDE_eqns); */ /* Sparse approximate inverse smoother that acutally does both */ /* pre and post smoothing. */ /* ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth, parasails_sym, parasails_thresh, parasails_nlevels, parasails_filter, parasails_loadbal, parasails_factorized); */ /* This is the symmetric Gauss-Seidel smoothing that we usually use. */ /* In parallel, it is not a true Gauss-Seidel in that each processor */ /* does a Gauss-Seidel on its local submatrix independent of the */ /* other processors. */ /* ML_Gen_Smoother_Cheby(ml, level, ML_BOTH, 30., nsmooth); */ Ndof = ml->Amat[level].invec_leng; ML_Gen_Blocks_Aggregates(ag, level, &nBlocks, &blockIndices); ML_Gen_Smoother_BlockDiagScaledCheby(ml, level, ML_BOTH, 30.,nsmooth, nBlocks, blockIndices); /* ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_BOTH, nsmooth,1.); */ /* This is a true Gauss Seidel in parallel. This seems to work for */ /* elasticity problems. However, I don't believe that this is very */ /* efficient in parallel. */ /* nblocks = ml->Amat[level].invec_leng/num_PDE_eqns; blocks = (int *) ML_allocate(sizeof(int)*N_update); for (i =0; i < ml->Amat[level].invec_leng; i++) blocks[i] = i/num_PDE_eqns; ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml , level, ML_PRESMOOTHER, nsmooth, 1., nblocks, blocks); ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml, level, ML_POSTSMOOTHER, nsmooth, 1., nblocks, blocks); free(blocks); */ /* Block Jacobi Smoothing */ /* nblocks = ml->Amat[level].invec_leng/num_PDE_eqns; blocks = (int *) ML_allocate(sizeof(int)*N_update); for (i =0; i < ml->Amat[level].invec_leng; i++) blocks[i] = i/num_PDE_eqns; ML_Gen_Smoother_VBlockJacobi(ml , level, ML_BOTH, nsmooth, ML_ONE_STEP_CG, nblocks, blocks); free(blocks); */ /* Jacobi Smoothing */ /* ML_Gen_Smoother_Jacobi(ml , level, ML_PRESMOOTHER, nsmooth, ML_ONE_STEP_CG); ML_Gen_Smoother_Jacobi(ml , level, ML_POSTSMOOTHER, nsmooth,ML_ONE_STEP_CG); */ /* This does a block Gauss-Seidel (not true GS in parallel) */ /* where each processor has 'nblocks' blocks. */ /* nblocks = 250; ML_Gen_Blocks_Metis(ml, level, &nblocks, &blocks); ML_Gen_Smoother_VBlockJacobi(ml , level, ML_BOTH, nsmooth,ML_ONE_STEP_CG, nblocks, blocks); free(blocks); */ num_PDE_eqns = 6; } /* Choose coarse grid solver: mls, superlu, symGS, or Aztec */ /* ML_Gen_Smoother_Cheby(ml, coarsest_level, ML_BOTH, 30., nsmooth); ML_Gen_CoarseSolverSuperLU( ml, coarsest_level); */ /* ML_Gen_Smoother_SymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1.); */ old_prec = options[AZ_precond]; old_sol = options[AZ_solver]; old_tol = params[AZ_tol]; params[AZ_tol] = 1.0e-9; params[AZ_tol] = 1.0e-5; options[AZ_precond] = AZ_Jacobi; options[AZ_solver] = AZ_cg; options[AZ_poly_ord] = 1; options[AZ_conv] = AZ_r0; options[AZ_orth_kvecs] = AZ_TRUE; j = AZ_gsum_int(ml->Amat[coarsest_level].outvec_leng, proc_config); options[AZ_keep_kvecs] = j - 6; options[AZ_max_iter] = options[AZ_keep_kvecs]; ML_Gen_SmootherAztec(ml, coarsest_level, options, params, proc_config, status, options[AZ_keep_kvecs], ML_PRESMOOTHER, NULL); options[AZ_conv] = AZ_noscaled; options[AZ_keep_kvecs] = 0; options[AZ_orth_kvecs] = 0; options[AZ_precond] = old_prec; options[AZ_solver] = old_sol; params[AZ_tol] = old_tol; /* */ #ifdef RST_MODIF ML_Gen_Solver(ml, ML_MGV, N_levels-1, coarsest_level); #else #ifdef MB_MODIF ML_Gen_Solver(ml, ML_SAAMG, N_levels-1, coarsest_level); #else ML_Gen_Solver(ml, ML_MGFULLV, N_levels-1, coarsest_level); #endif #endif options[AZ_solver] = AZ_GMRESR; options[AZ_solver] = AZ_cg; options[AZ_scaling] = AZ_none; options[AZ_precond] = AZ_user_precond; options[AZ_conv] = AZ_r0; options[AZ_conv] = AZ_noscaled; options[AZ_output] = 1; options[AZ_max_iter] = 500; options[AZ_poly_ord] = 5; options[AZ_kspace] = 40; params[AZ_tol] = 4.8e-6; AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); setup_time = AZ_second() - start_time; /* Set rhs */ fp = fopen("AZ_capture_rhs.dat","r"); if (fp == NULL) { AZ_random_vector(rhs, data_org, proc_config); if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n"); for (i = 0; i < -N_update; i++) { rhs[i] = (double) update[i]; rhs[i] = 7.; } } else { if (proc_config[AZ_node]== 0) printf("reading rhs guess from file\n"); AZ_input_msr_matrix("AZ_capture_rhs.dat", update, &rhs, &garbage, N_update, proc_config); free(garbage); } AZ_reorder_vec(rhs, data_org, update_index, NULL); printf("changing rhs by multiplying with A\n"); Amat->matvec(rhs, xxx, Amat, proc_config); for (i = 0; i < N_update; i++) rhs[i] = xxx[i]; fp = fopen("AZ_capture_init_guess.dat","r"); if (fp != NULL) { fclose(fp); if (proc_config[AZ_node]== 0) printf("reading initial guess from file\n"); AZ_input_msr_matrix("AZ_capture_init_guess.dat", update, &xxx, &garbage, N_update, proc_config); free(garbage); xxx = (double *) realloc(xxx, sizeof(double)*( Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border] + Amat->data_org[AZ_N_external])); } AZ_reorder_vec(xxx, data_org, update_index, NULL); /* if Dirichlet BC ... put the answer in */ /* for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) { if ( (val[i] > .99999999) && (val[i] < 1.0000001)) xxx[i] = rhs[i]; } */ fp = fopen("AZ_no_multilevel.dat","r"); scaling = AZ_scaling_create(); start_time = AZ_second(); if (fp != NULL) { fclose(fp); options[AZ_precond] = AZ_none; options[AZ_scaling] = AZ_sym_diag; options[AZ_ignore_scaling] = AZ_TRUE; options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); /* options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); */ } else { options[AZ_keep_info] = 1; options[AZ_conv] = AZ_noscaled; options[AZ_conv] = AZ_r0; params[AZ_tol] = 1.0e-7; /* ML_Iterate(ml, xxx, rhs); */ alpha = sqrt(AZ_gdot(N_update, xxx, xxx, proc_config)); printf("init guess = %e\n",alpha); alpha = sqrt(AZ_gdot(N_update, rhs, rhs, proc_config)); printf("rhs = %e\n",alpha); #ifdef SCALE_ME ML_MSR_scalerhs(rhs, scaling_vect, data_org[AZ_N_internal] + data_org[AZ_N_border]); ML_MSR_scalesol(xxx, scaling_vect, data_org[AZ_N_internal] + data_org[AZ_N_border]); #endif max_diag = 0.; min_diag = 1.e30; max_sum = 0.; for (i = 0; i < N_update; i++) { if (Amat->val[i] < 0.) printf("woops negative diagonal A(%d,%d) = %e\n", i,i,Amat->val[i]); if (Amat->val[i] > max_diag) max_diag = Amat->val[i]; if (Amat->val[i] < min_diag) min_diag = Amat->val[i]; sum = fabs(Amat->val[i]); for (j = Amat->bindx[i]; j < Amat->bindx[i+1]; j++) { sum += fabs(Amat->val[j]); } if (sum > max_sum) max_sum = sum; } printf("Largest diagonal = %e, min diag = %e large abs row sum = %e\n", max_diag, min_diag, max_sum); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; /* if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); */ } solve_time = AZ_second() - start_time; if (proc_config[AZ_node] == 0) printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time); if (proc_config[AZ_node] == 0) printf("Printing out a few entries of the solution ...\n"); for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 7) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 23) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 47) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 101) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 171) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} ML_Aggregate_Destroy(&ag); ML_Destroy(&ml); AZ_free((void *) Amat->data_org); AZ_free((void *) Amat->val); AZ_free((void *) Amat->bindx); AZ_free((void *) update); AZ_free((void *) external); AZ_free((void *) extern_index); AZ_free((void *) update_index); AZ_scaling_destroy(&scaling); if (Amat != NULL) AZ_matrix_destroy(&Amat); if (Pmat != NULL) AZ_precond_destroy(&Pmat); free(xxx); free(rhs); #ifdef HAVE_MPI MPI_Finalize(); #endif return 0; }
int test_azoo_conv_with_scaling(int conv_option, int scaling_option, const Epetra_Comm& comm, bool verbose) { int localN = 20; int numprocs = comm.NumProc(); int globalN = numprocs*localN; Epetra_Map emap(globalN, 0, comm); Epetra_CrsMatrix* Acrs = create_and_fill_crs_matrix(emap); Epetra_Vector x_crs(emap), b_crs(emap); x_crs.PutScalar(1.0); Acrs->Multiply(false, x_crs, b_crs); x_crs.PutScalar(0.0); AztecOO azoo(Acrs, &x_crs, &b_crs); azoo.SetAztecOption(AZ_conv, conv_option); azoo.SetAztecOption(AZ_solver, AZ_cg); azoo.SetAztecOption(AZ_scaling, scaling_option); azoo.Iterate(100, 1.e-9); //now, do the same thing with 'old-fashioned Aztec', and compare //the solutions. int* proc_config = new int[AZ_PROC_SIZE]; #ifdef EPETRA_MPI AZ_set_proc_config(proc_config, MPI_COMM_WORLD); AZ_set_comm(proc_config, MPI_COMM_WORLD); #else AZ_set_proc_config(proc_config, 0); #endif int *external, *update_index, *external_index; int *external2, *update_index2, *external_index2; AZ_MATRIX* Amsr = NULL; AZ_MATRIX* Avbr = NULL; int err = create_and_transform_simple_matrix(AZ_MSR_MATRIX, localN, 4.0, proc_config, Amsr, external, update_index, external_index); int N_update = localN+Amsr->data_org[AZ_N_border]; double* x_msr = new double[N_update]; double* b_msr = new double[N_update*2]; double* b_msr_u = b_msr+N_update; double* x_vbr = new double[N_update]; double* b_vbr = new double[N_update*2]; double* b_vbr_u = b_vbr+N_update; err = create_and_transform_simple_matrix(AZ_VBR_MATRIX, localN, 4.0, proc_config, Avbr, external2, update_index2, external_index2); for(int i=0; i<N_update; ++i) { x_msr[i] = 1.0; b_msr[i] = 0.0; b_msr_u[i] = 0.0; x_vbr[i] = 1.0; b_vbr[i] = 0.0; b_vbr_u[i] = 0.0; } Amsr->matvec(x_msr, b_msr, Amsr, proc_config); Avbr->matvec(x_vbr, b_vbr, Avbr, proc_config); for(int i=0; i<N_update; ++i) { x_msr[i] = 0.0; x_vbr[i] = 0.0; } //check that the rhs's are the same. double max_rhs_diff1 = 0.0; double max_rhs_diff2 = 0.0; double* bptr_crs = b_crs.Values(); AZ_invorder_vec(b_msr, Amsr->data_org, update_index, NULL, b_msr_u); AZ_invorder_vec(b_vbr, Avbr->data_org, update_index2, Avbr->rpntr, b_vbr_u); for(int i=0; i<localN; ++i) { if (std::abs(bptr_crs[i] - b_msr_u[i]) > max_rhs_diff1) { max_rhs_diff1 = std::abs(bptr_crs[i] - b_msr_u[i]); } if (std::abs(bptr_crs[i] - b_vbr_u[i]) > max_rhs_diff2) { max_rhs_diff2 = std::abs(bptr_crs[i] - b_vbr_u[i]); } } if (max_rhs_diff1> 1.e-12) { cout << "AztecOO rhs not equal to Aztec msr rhs "<<max_rhs_diff1<<endl; return(-1); } if (max_rhs_diff2> 1.e-12) { cout << "AztecOO rhs not equal to Aztec vbr rhs "<<max_rhs_diff2<<endl; return(-1); } int* az_options = new int[AZ_OPTIONS_SIZE]; double* params = new double[AZ_PARAMS_SIZE]; double* status = new double[AZ_STATUS_SIZE]; AZ_defaults(az_options, params); az_options[AZ_solver] = AZ_cg; az_options[AZ_conv] = conv_option; az_options[AZ_scaling] = scaling_option; az_options[AZ_max_iter] = 100; params[AZ_tol] = 1.e-9; AZ_iterate(x_msr, b_msr, az_options, params, status, proc_config, Amsr, NULL, NULL); AZ_iterate(x_vbr, b_vbr, az_options, params, status, proc_config, Avbr, NULL, NULL); AZ_invorder_vec(x_msr, Amsr->data_org, update_index, NULL, b_msr_u); AZ_invorder_vec(x_vbr, Avbr->data_org, update_index2, Avbr->rpntr, b_vbr_u); double max_diff1 = 0.0; double max_diff2 = 0.0; double* xptr_crs = x_crs.Values(); for(int i=0; i<localN; ++i) { if (std::abs(xptr_crs[i] - b_msr_u[i]) > max_diff1) { max_diff1 = std::abs(xptr_crs[i] - b_msr_u[i]); } if (std::abs(xptr_crs[i] - b_vbr_u[i]) > max_diff2) { max_diff2 = std::abs(xptr_crs[i] - b_vbr_u[i]); } } if (max_diff1 > 1.e-7) { cout << "AztecOO failed to match Aztec msr with scaling and Anorm conv." << endl; return(-1); } if (max_diff2 > 1.e-7) { cout << "AztecOO failed to match Aztec vbr with scaling and Anorm conv." << endl; return(-1); } delete Acrs; delete [] x_msr; delete [] b_msr; delete [] x_vbr; delete [] b_vbr; destroy_matrix(Amsr); destroy_matrix(Avbr); delete [] proc_config; free(update_index); free(external); free(external_index); free(update_index2); free(external2); free(external_index2); delete [] az_options; delete [] params; delete [] status; return(0); }
int test_AZ_iterate_then_AZ_scale_f(Epetra_Comm& Comm, bool verbose) { (void)Comm; if (verbose) { cout << "testing AZ_iterate/AZ_scale_f with 'old' Aztec"<<endl; } int* proc_config = new int[AZ_PROC_SIZE]; #ifdef EPETRA_MPI AZ_set_proc_config(proc_config, MPI_COMM_WORLD); AZ_set_comm(proc_config, MPI_COMM_WORLD); #else AZ_set_proc_config(proc_config, 0); #endif int *external, *update_index, *external_index; int i, N = 5; AZ_MATRIX* Amat = NULL; int err = create_and_transform_simple_matrix(AZ_MSR_MATRIX, N, 3.0, proc_config, Amat, external, update_index, external_index); if (err != 0) { return(err); } int* options = new int[AZ_OPTIONS_SIZE]; double* params = new double[AZ_PARAMS_SIZE]; double* status = new double[AZ_STATUS_SIZE]; AZ_defaults(options, params); options[AZ_scaling] = AZ_sym_diag; if (verbose) { options[AZ_output] = AZ_warnings; } else { options[AZ_output] = 0; } int N_update = N+Amat->data_org[AZ_N_border]; double* x = new double[N_update]; double* b = new double[N_update]; for(i=0; i<N_update; ++i) { x[i] = 0.0; b[i] = 1.0; } AZ_PRECOND* Pmat = AZ_precond_create(Amat, AZ_precondition, NULL); AZ_SCALING* Scal = AZ_scaling_create(); options[AZ_keep_info] = 1; AZ_iterate(x, b, options, params, status, proc_config, Amat, Pmat, Scal); //now set options[AZ_pre_calc] = AZ_reuse and try to call AZ_scale_f. options[AZ_pre_calc] = AZ_reuse; AZ_scale_f(AZ_SCALE_MAT_RHS_SOL, Amat, options, b, x, proc_config, Scal); AZ_scaling_destroy(&Scal); AZ_precond_destroy(&Pmat); destroy_matrix(Amat); delete [] x; delete [] b; delete [] options; delete [] params; delete [] status; delete [] proc_config; free(update_index); free(external); free(external_index); return(0); }
int main(int argc, char *argv[]) { int proc_config[AZ_PROC_SIZE];/* Processor information. */ int options[AZ_OPTIONS_SIZE]; /* Array used to select solver options. */ double params[AZ_PARAMS_SIZE]; /* User selected solver paramters. */ double status[AZ_STATUS_SIZE]; /* Information returned from AZ_solve(). */ int *bindx_real; /* index and values arrays for MSR matrices */ double *val_real, *val_imag; int * update; /* List of global eqs owned by the processor */ double *x_real, *b_real; /* initial guess/solution, RHS */ double *x_imag, *b_imag; unsigned int N_local; /* Number of equations on this node */ double residual; /* Used for computing residual */ double *xx_real, *xx_imag, *xx; /* Known exact solution */ int myPID, nprocs; AZ_MATRIX *Amat_real; /* Real matrix structure */ AZ_MATRIX *Amat; /* Komplex matrix to be solved. */ AZ_PRECOND *Prec; /* Komplex preconditioner */ double *x, *b; /* Komplex Initial guess and RHS */ int i; /******************************/ /* First executable statement */ /******************************/ #ifdef AZTEC_MPI MPI_Init(&argc,&argv); #endif /* Get number of processors and the name of this processor */ #ifdef AZTEC_MPI AZ_set_proc_config(proc_config,MPI_COMM_WORLD); #else AZ_set_proc_config(proc_config,0); #endif nprocs = proc_config[AZ_N_procs]; myPID = proc_config[AZ_node]; printf("proc %d of %d is alive\n",myPID, nprocs); /* Define two real diagonal matrices. Will use as real and imaginary parts */ /* Get the number of local equations from the command line */ if (argc!=2) { if (myPID==0) printf("Usage: %s number_of_local_equations\n",argv[0]); exit(1); } N_local = atoi(argv[1]); const unsigned int N_local_max = 1000000; if (N_local > N_local_max) { if (myPID==0) printf("No more than %d local equation allowed\n", N_local_max); exit(1); } /* Need N_local+1 elements for val/bindx arrays */ val_real = malloc((N_local+1)*sizeof(double)); val_imag = malloc((N_local+1)*sizeof(double)); /* bindx_imag is not needed since real/imag have same pattern */ bindx_real = malloc((N_local+1)*sizeof(int)); update = malloc((N_local+1)*sizeof(int)); /* Malloc equation update list */ b_real = malloc((N_local+1)*sizeof(double)); /* Malloc x and b arrays */ b_imag = malloc((N_local+1)*sizeof(double)); x_real = malloc((N_local+1)*sizeof(double)); x_imag = malloc((N_local+1)*sizeof(double)); xx_real = malloc((N_local+1)*sizeof(double)); xx_imag = malloc((N_local+1)*sizeof(double)); for (i=0; i<N_local; i++) { val_real[i] = 10 + i/(N_local/10); /* Some very fake diagonals */ val_imag[i] = 10 - i/(N_local/10); /* Should take exactly 20 GMRES steps */ x_real[i] = 0.0; /* Zero initial guess */ x_imag[i] = 0.0; xx_real[i] = 1.0; /* Let exact solution = 1 */ xx_imag[i] = 0.0; /* Generate RHS to match exact solution */ b_real[i] = val_real[i]*xx_real[i] - val_imag[i]*xx_imag[i]; b_imag[i] = val_imag[i]*xx_real[i] + val_real[i]*xx_imag[i]; /* All bindx[i] have same value since no off-diag terms */ bindx_real[i] = N_local + 1; /* each processor owns equations myPID*N_local through myPID*N_local + N_local - 1 */ update[i] = myPID*N_local + i; } bindx_real[N_local] = N_local+1; /* Need this last index */ /* Register Aztec Matrix for Real Part, only imaginary values are needed*/ Amat_real = AZ_matrix_create(N_local); AZ_set_MSR(Amat_real, bindx_real, val_real, NULL, N_local, update, AZ_GLOBAL); /* initialize AZTEC options */ AZ_defaults(options, params); options[AZ_solver] = AZ_gmres; /* Use CG with no preconditioning */ options[AZ_precond] = AZ_none; options[AZ_kspace] = 21; options[AZ_max_iter] = 21; params[AZ_tol] = 1.e-14; /**************************************************************/ /* Construct linear system. Form depends on input parameters */ /**************************************************************/ /**************************************************************/ /* Method 1: Construct A, x, and b in one call. */ /* Useful if using A,x,b only one time. Equivalent to Method 2*/ /**************************************************************/ AZK_create_linsys_ri2k (x_real, x_imag, b_real, b_imag, options, params, proc_config, Amat_real, val_imag, &x, &b, &Amat); /**************************************************************/ /* Method 2: Construct A, x, and b in separate calls. */ /* Useful for having more control over the construction. */ /* Note that the matrix must be constructed first. */ /**************************************************************/ /* Uncomment these three calls and comment out the above call AZK_create_matrix_ri2k (options, params, proc_config, Amat_real, val_imag, &Amat); AZK_create_vector_ri2k(options, params, proc_config, Amat, x_real, x_imag, &x); AZK_create_vector_ri2k(options, params, proc_config, Amat, b_real, b_imag, &b); */ /**************************************************************/ /* Build exact solution vector. */ /* Check residual of init guess and exact solution */ /**************************************************************/ AZK_create_vector_ri2k(options, params, proc_config, Amat, xx_real, xx_imag, &xx); residual = AZK_residual_norm(x, b, options, params, proc_config, Amat); if (proc_config[AZ_node]==0) printf("\n\n\nNorm of residual using initial guess = %12.4g\n",residual); residual = AZK_residual_norm(xx, b, options, params, proc_config, Amat); AZK_destroy_vector(options, params, proc_config, Amat, &xx); if (proc_config[AZ_node]==0) printf("\n\n\nNorm of residual using exact solution = %12.4g\n",residual); /**************************************************************/ /* Create preconditioner */ /**************************************************************/ AZK_create_precon(options, params, proc_config, x, b, Amat, &Prec); /**************************************************************/ /* Solve linear system using Aztec. */ /**************************************************************/ AZ_iterate(x, b, options, params, status, proc_config, Amat, Prec, NULL); /**************************************************************/ /* Extract solution. */ /**************************************************************/ AZK_extract_solution_k2ri(options, params, proc_config, Amat, Prec, x, x_real, x_imag); /**************************************************************/ /* Destroy Preconditioner. */ /**************************************************************/ AZK_destroy_precon (options, params, proc_config, Amat, &Prec); /**************************************************************/ /* Destroy linear system. */ /**************************************************************/ AZK_destroy_linsys (options, params, proc_config, &x, &b, &Amat); if (proc_config[AZ_node]==0) { printf("True residual norm squared = %22.16g\n",status[AZ_r]); printf("True scaled res norm squared = %22.16g\n",status[AZ_scaled_r]); printf("Computed res norm squared = %22.16g\n",status[AZ_rec_r]); } /* Print comparison between known exact and computed solution */ {double sum = 0.0; for (i=0; i<N_local; i++) sum += fabs(x_real[i]-xx_real[i]); for (i=0; i<N_local; i++) sum += fabs(x_imag[i]-xx_imag[i]); printf("Processor %d: Difference between exact and computed solution = %12.4g\n", proc_config[AZ_node],sum); } /* Free memory allocated */ free((void *) val_real ); free((void *) bindx_real ); free((void *) val_imag ); free((void *) update ); free((void *) b_real ); free((void *) b_imag ); free((void *) x_real ); free((void *) x_imag ); free((void *) xx_real ); free((void *) xx_imag ); AZ_matrix_destroy(&Amat_real); #ifdef AZTEC_MPI MPI_Finalize(); #endif return 0 ; }