HYPRE_Int HYPRE_ParCSRMLDestroy( HYPRE_Solver solver ) { HYPRE_Int i; MH_Matrix *Amat; MH_Link *link = (MH_Link *) solver; if ( link->ml_ag != NULL ) ML_Aggregate_Destroy( &(link->ml_ag) ); ML_Destroy( &(link->ml_ptr) ); if ( link->contxt->partition != NULL ) free( link->contxt->partition ); if ( link->contxt->Amat != NULL ) { Amat = (MH_Matrix *) link->contxt->Amat; if ( Amat->sendProc != NULL ) free (Amat->sendProc); if ( Amat->sendLeng != NULL ) free (Amat->sendLeng); if ( Amat->sendList != NULL ) { for (i = 0; i < Amat->sendProcCnt; i++ ) if (Amat->sendList[i] != NULL) free (Amat->sendList[i]); free (Amat->sendList); } if ( Amat->recvProc != NULL ) free (Amat->recvProc); if ( Amat->recvLeng != NULL ) free (Amat->recvLeng); if ( Amat->map != NULL ) free (Amat->map); free( Amat ); } if ( link->contxt != NULL ) free( link->contxt ); free( link ); return 0; }
PetscErrorCode PCReset_ML(PC pc) { PetscErrorCode ierr; PC_MG *mg = (PC_MG*)pc->data; PC_ML *pc_ml = (PC_ML*)mg->innerctx; PetscInt level,fine_level=pc_ml->Nlevels-1; PetscFunctionBegin; ML_Aggregate_Destroy(&pc_ml->agg_object); ML_Destroy(&pc_ml->ml_object); if (pc_ml->PetscMLdata) { ierr = PetscFree(pc_ml->PetscMLdata->pwork);CHKERRQ(ierr); ierr = MatDestroy(&pc_ml->PetscMLdata->Aloc);CHKERRQ(ierr); ierr = VecDestroy(&pc_ml->PetscMLdata->x);CHKERRQ(ierr); ierr = VecDestroy(&pc_ml->PetscMLdata->y);CHKERRQ(ierr); } ierr = PetscFree(pc_ml->PetscMLdata);CHKERRQ(ierr); if (pc_ml->gridctx) { for (level=0; level<fine_level; level++){ if (pc_ml->gridctx[level].A){ierr = MatDestroy(&pc_ml->gridctx[level].A);CHKERRQ(ierr);} if (pc_ml->gridctx[level].P){ierr = MatDestroy(&pc_ml->gridctx[level].P);CHKERRQ(ierr);} if (pc_ml->gridctx[level].R){ierr = MatDestroy(&pc_ml->gridctx[level].R);CHKERRQ(ierr);} if (pc_ml->gridctx[level].x){ierr = VecDestroy(&pc_ml->gridctx[level].x);CHKERRQ(ierr);} if (pc_ml->gridctx[level].b){ierr = VecDestroy(&pc_ml->gridctx[level].b);CHKERRQ(ierr);} if (pc_ml->gridctx[level+1].r){ierr = VecDestroy(&pc_ml->gridctx[level+1].r);CHKERRQ(ierr);} } } ierr = PetscFree(pc_ml->gridctx);CHKERRQ(ierr); PetscFunctionReturn(0); }
void sample1(struct data *Afine_data, struct data *Acoarse_data, struct data *Rmat_data, struct data *Pmat_data, double *sol, double *rhs ) { ML *my_ml; int i; int fine_grid, output_level = 10, N_grids = 2, grid0 = 0, grid1 = 1; int Nfine, Ncoarse; double *diagonal; Nfine = Rmat_data->from_size; Ncoarse = Rmat_data->to_size; diagonal = (double *) malloc(Nfine*sizeof(double)); for (i = 0; i < Nfine; i++) diagonal[i] = 2.; fine_grid = grid1; ML_Create (&my_ml, N_grids); ML_Set_OutputLevel( my_ml, output_level); ML_Init_Amatrix (my_ml, grid1, Nfine, Nfine,(void *) Afine_data); ML_Set_Amatrix_Getrow(my_ml, grid1, myAgetrow, my_comm, Nfine+1); ML_Set_Amatrix_Matvec(my_ml, grid1, mymatvec); ML_Set_Amatrix_Diag (my_ml, grid1, Nfine, diagonal); ML_Gen_Smoother_Jacobi(my_ml, grid1, ML_PRESMOOTHER, 2, ML_DEFAULT); ML_Init_Prolongator(my_ml, grid0, grid1, Ncoarse,Nfine,(void *)Pmat_data); ML_Set_Prolongator_Getrow(my_ml, grid0, myPgetrow, my_comm, Ncoarse+1); ML_Set_Prolongator_Matvec(my_ml, grid0, myinterp); ML_Init_Restrictor(my_ml, grid1, grid0, Nfine, Ncoarse,(void *)Rmat_data); ML_Set_Restrictor_Getrow(my_ml, grid1, myRgetrow, my_comm, Nfine+1); ML_Set_Restrictor_Matvec(my_ml, grid1, myrestrict); ML_Gen_AmatrixRAP(my_ml,grid1, grid0); #ifdef SUPERLU ML_Gen_CoarseSolverSuperLU(my_ml, grid0); #else ML_Gen_Smoother_Jacobi(my_ml, grid0, ML_PRESMOOTHER, 100, ML_DEFAULT); #endif /* ML_Gen_Smoother_Jacobi(my_ml, grid0, ML_PRESMOOTHER, 200, ML_DEFAULT); */ /* ML_Gen_Smoother_GaussSeidel(my_ml, grid0, ML_PRESMOOTHER, 200, 1.); */ ML_Gen_Solver (my_ml, 0, fine_grid, grid0); ML_Iterate(my_ml, sol, rhs); ML_Destroy(&my_ml); ML_free(diagonal); }
void sample2(struct data *Afine_data, struct data *Acoarse_data, struct data *Rmat_data, struct data *Pmat_data, double *sol, double *rhs) { ML *my_ml; struct data fsmooth, csmooth; int fine_grid, output_level = 10, N_grids = 2, grid0 = 0, grid1 = 1; int Nfine, Ncoarse; Nfine = Rmat_data->from_size; Ncoarse = Rmat_data->to_size; fsmooth.size = Nfine; fsmooth.ntimes = 4; fsmooth.processor_info = Afine_data->processor_info; csmooth.size = Ncoarse; csmooth.processor_info = Afine_data->processor_info; csmooth.ntimes = 1000; fine_grid = grid1; ML_Create (&my_ml, N_grids); ML_Set_OutputLevel( my_ml, output_level); ML_Init_Amatrix (my_ml, grid1, Nfine, Nfine, (void *) Afine_data); ML_Set_Amatrix_Matvec(my_ml, grid1, mymatvec); ML_Init_Amatrix (my_ml, grid0, Ncoarse, Ncoarse, (void *) Acoarse_data); ML_Set_Amatrix_Matvec(my_ml, grid0, mymatvec); ML_Init_Restrictor(my_ml, grid1, grid0, Nfine, Ncoarse,(void *)Rmat_data); ML_Set_Restrictor_Matvec(my_ml, grid1, myrestrict); ML_Init_Prolongator(my_ml, grid0, grid1, Ncoarse, Nfine,(void *)Pmat_data); ML_Set_Prolongator_Matvec(my_ml, grid0, myinterp); ML_Set_Smoother (my_ml, grid1, ML_PRESMOOTHER, (void *)&fsmooth,mysmooth, NULL); ML_Set_Smoother (my_ml, grid0, ML_PRESMOOTHER, (void *)&csmooth,mysmooth, NULL); ML_Gen_Solver (my_ml, 0, fine_grid, grid0 ); ML_Iterate(my_ml, sol, rhs); ML_Destroy(&my_ml); }
void sample3(struct data *Afine_data, struct data *Acoarse_data, struct data *Rmat_data, struct data *Pmat_data, double *sol, double *rhs ) { ML *my_ml; double *diagonal; int i, fine_grid, output_level = 10, N_grids = 2, grid0 = 1, grid1 = 0; int Nfine, Ncoarse; Nfine = Rmat_data->from_size; Ncoarse = Rmat_data->to_size; diagonal = (double *) malloc(Nfine*sizeof(double)); for (i = 0; i < Nfine; i++) diagonal[i] = 2.; fine_grid = grid1; ML_Create (&my_ml, N_grids); ML_Set_OutputLevel(my_ml, output_level); ML_Init_Amatrix (my_ml, grid1, Nfine, Nfine, (void *) Afine_data); ML_Set_Amatrix_Matvec(my_ml, grid1, mymatvec ); ML_Set_Amatrix_Diag (my_ml, grid1, Nfine, diagonal); ML_Gen_Smoother_Jacobi(my_ml, grid1, ML_PRESMOOTHER, 2, ML_DEFAULT); ML_Init_Amatrix (my_ml, grid0, Ncoarse, Ncoarse, (void *) Acoarse_data); ML_Set_Amatrix_Matvec(my_ml, grid0, mymatvec); ML_Set_Amatrix_Diag (my_ml, grid0, Ncoarse, diagonal); ML_Gen_Smoother_Jacobi(my_ml, grid0, ML_PRESMOOTHER, 200, ML_DEFAULT); ML_Init_Prolongator(my_ml, grid0, grid1, Ncoarse, Nfine, (void*)Pmat_data); ML_Set_Prolongator_Matvec(my_ml, grid0, myinterp); ML_Init_Restrictor(my_ml, grid1, grid0, Nfine, Ncoarse,(void *)Rmat_data); ML_Set_Restrictor_Matvec(my_ml, grid1, myrestrict); ML_Gen_Solver (my_ml, 0, fine_grid, grid0); ML_free(diagonal); ML_Iterate(my_ml, sol, rhs); ML_Destroy(&my_ml); }
int main(int argc, char *argv[]){ ML *ml_object; int i, N_grids = 3, N_levels; double sol[5], rhs[5]; ML_Aggregate *agg_object; int proc, nlocal, nlocal_allcolumns; MPI_Init(&argc,&argv); ML_Set_PrintLevel(15); for (i = 0; i < 5; i++) sol[i] = 0.; for (i = 0; i < 5; i++) rhs[i] = 2.; ML_Create (&ml_object, N_grids); proc = ml_object->comm->ML_mypid; if (ml_object->comm->ML_nprocs != 2) { if (proc == 0) printf("Must be run on two processors\n"); ML_Destroy(&ml_object); MPI_Finalize(); exit(1); } if (proc == 0) {nlocal = 2; nlocal_allcolumns = 4;} else if (proc == 1){nlocal = 3; nlocal_allcolumns = 5;} else {nlocal = 0; nlocal_allcolumns = 0;} ML_Init_Amatrix (ml_object, 0, nlocal, nlocal, &proc); ML_Set_Amatrix_Getrow(ml_object, 0, Poisson_getrow, Poisson_comm, nlocal_allcolumns); ML_Set_Amatrix_Matvec(ml_object, 0, Poisson_matvec); ML_Aggregate_Create(&agg_object); ML_Aggregate_Set_MaxCoarseSize(agg_object,1); N_levels = ML_Gen_MGHierarchy_UsingAggregation(ml_object, 0, ML_INCREASING, agg_object); ML_Gen_Smoother_Jacobi(ml_object, ML_ALL_LEVELS, ML_PRESMOOTHER, 1, ML_DEFAULT); ML_Gen_Solver (ml_object, ML_MGV, 0, N_levels-1); ML_Iterate(ml_object, sol, rhs); if (proc == 0) { printf("sol(0) = %e\n",sol[1]); fflush(stdout); } ML_Comm_GsumInt(ml_object->comm,1); /* just used for synchronization */ if (proc == 1) { printf("sol(1) = %e\n",sol[0]); printf("sol(2) = %e\n",sol[1]); printf("sol(3) = %e\n",sol[2]); fflush(stdout); } ML_Comm_GsumInt(ml_object->comm,1); /* just used for synchronization */ if (proc == 0) { printf("sol(4) = %e\n",sol[0]); fflush(stdout); } ML_Aggregate_Destroy(&agg_object); ML_Destroy(&ml_object); MPI_Finalize(); return 0; }
int main(int argc, char *argv[]) { int Nnodes=16*16; /* Total number of nodes in the problem.*/ /* 'Nnodes' must be a perfect square. */ int MaxMgLevels=6; /* Maximum number of Multigrid Levels */ int Nits_per_presmooth=1; /* # of pre & post smoothings per level */ double tolerance = 1.0e-8; /* At convergence: */ /* ||r_k||_2 < tolerance ||r_0||_2 */ int smoothPe_flag = ML_YES; /* ML_YES: smooth tentative prolongator */ /* ML_NO: don't smooth prolongator */ /***************************************************************************/ /* Select Hiptmair relaxation subsmoothers for the nodal and edge problems */ /* Choices include */ /* 1) ML_Gen_Smoother_SymGaussSeidel: this corresponds to a processor */ /* local version of symmetric Gauss-Seidel/SOR. The number of sweeps */ /* can be set via either 'edge_its' or 'nodal_its'. The damping can */ /* be set via 'edge_omega' or 'nodal_omega'. When set to ML_DDEFAULT, */ /* the damping is set to '1' on one processor. On multiple processors */ /* a lower damping value is set. This is needed to converge processor */ /* local SOR. */ /* 2) ML_Gen_Smoother_Cheby: this corresponds to polynomial relaxation. */ /* The degree of the polynomial is set via 'edge_its' or 'nodal_its'. */ /* If the degree is '-1', Marian Brezina's MLS polynomial is chosen. */ /* Otherwise, a Chebyshev polynomial is used over high frequencies */ /* [ lambda_max/alpha , lambda_max]. Lambda_max is computed. 'alpha' */ /* is hardwired in this example to correspond to twice the ratio of */ /* unknowns in the fine and coarse meshes. */ /* */ /* Using 'hiptmair_type' (see comments below) it is also possible to choose*/ /* when edge and nodal problems are relaxed within the Hiptmair smoother. */ /***************************************************************************/ void *edge_smoother=(void *) /* Edge relaxation: */ ML_Gen_Smoother_Cheby; /* ML_Gen_Smoother_Cheby */ /* ML_Gen_Smoother_SymGaussSeidel */ void *nodal_smoother=(void *) /* Nodal relaxation */ ML_Gen_Smoother_Cheby;/* ML_Gen_Smoother_Cheby */ /* ML_Gen_Smoother_SymGaussSeidel */ int edge_its = 3; /* Iterations or polynomial degree for */ int nodal_its = 3; /* edge/nodal subsmoothers. */ double nodal_omega = ML_DDEFAULT, /* SOR damping parameter for noda/edge */ edge_omega = ML_DDEFAULT; /* subsmoothers (see comments above). */ int hiptmair_type=HALF_HIPTMAIR;/* FULL_HIPTMAIR: each invokation */ /* smoothes on edges, then nodes, */ /* and then once again on edges. */ /* HALF_HIPTMAIR: each pre-invokation */ /* smoothes on edges, then nodes. */ /* Each post-invokation smoothes */ /* on nodes then edges. . */ ML_Operator *Tmat, *Tmat_trans, **Tmat_array, **Tmat_trans_array; ML *ml_edges, *ml_nodes; ML_Aggregate *ag; int Nfine_edge, Ncoarse_edge, Nfine_node, Ncoarse_node, Nlevels; int level, coarsest_level, itmp; double edge_coarsening_rate, node_coarsening_rate, *rhs, *xxx; void **edge_args, **nodal_args; struct user_partition Edge_Partition = {NULL, NULL,0,0}, Node_Partition = {NULL, NULL,0,0}; struct Tmat_data Tmat_data; int i, Ntotal; ML_Comm *comm; /* See Aztec User's Guide for information on these variables */ #ifdef AZTEC AZ_MATRIX *Ke_mat, *Kn_mat; AZ_PRECOND *Pmat = NULL; int proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE]; double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE]; #endif /* get processor information (proc id & # of procs) and set ML's printlevel. */ #ifdef ML_MPI MPI_Init(&argc,&argv); #endif #ifdef AZTEC AZ_set_proc_config(proc_config, COMMUNICATOR); #endif ML_Set_PrintLevel(10); /* set ML's output level: 0 gives least output */ /* Set the # of global nodes/edges and partition both the edges and the */ /* nodes over the processors. NOTE: I believe we assume that if an edge */ /* is assigned to a processor at least one of its nodes must be also */ /* assigned to that processor. */ Node_Partition.Nglobal = Nnodes; Edge_Partition.Nglobal = Node_Partition.Nglobal*2; Node_Partition.type = NODE; Edge_Partition.type = EDGE; #define perxodic #ifdef periodic Node_Partition.Nglobal += 2; #endif partition_edges(&Edge_Partition); partition_nodes(&Node_Partition); xxx = (double *) ML_allocate((Edge_Partition.Nlocal+100)*sizeof(double)); rhs = (double *) ML_allocate((Edge_Partition.Nlocal+100)*sizeof(double)); for (i = 0; i < Edge_Partition.Nlocal + 100; i++) xxx[i] = -1.; for (i = 0; i < Edge_Partition.Nlocal; i++) xxx[i] = (double) Edge_Partition.my_global_ids[i]; update_ghost_edges(xxx, (void *) &Edge_Partition); /* Create an empty multigrid hierarchy and set the 'MaxMGLevels-1'th */ /* level discretization within this hierarchy to the ML matrix */ /* representing Ke (Maxwell edge discretization). */ ML_Create(&ml_edges, MaxMgLevels); #ifdef AZTEC /* Build Ke as an Aztec matrix. Use built-in function AZ_ML_Set_Amat() */ /* to convert to an ML matrix and put in hierarchy. */ Ke_mat = user_Ke_build(&Edge_Partition); AZ_ML_Set_Amat(ml_edges, MaxMgLevels-1, Edge_Partition.Nlocal, Edge_Partition.Nlocal, Ke_mat, proc_config); #else /* Build Ke directly as an ML matrix. */ ML_Init_Amatrix (ml_edges, MaxMgLevels-1, Edge_Partition.Nlocal, Edge_Partition.Nlocal, &Edge_Partition); Ntotal = Edge_Partition.Nlocal; if (Edge_Partition.nprocs == 2) Ntotal += Edge_Partition.Nghost; ML_Set_Amatrix_Getrow(ml_edges, MaxMgLevels-1, Ke_getrow, update_ghost_edges, Ntotal); ML_Set_Amatrix_Matvec(ml_edges, MaxMgLevels-1, Ke_matvec); #endif /* Build an Aztec matrix representing an auxiliary nodal PDE problem. */ /* This should be a variable coefficient Poisson problem (with unknowns*/ /* at the nodes). The coefficients should be chosen to reflect the */ /* conductivity of the original edge problems. */ /* Create an empty multigrid hierarchy. Convert the Aztec matrix to an */ /* ML matrix and put it in the 'MaxMGLevels-1' level of the hierarchy. */ /* Note it is possible to multiply T'*T for get this matrix though this*/ /* will not incorporate material properties. */ ML_Create(&ml_nodes, MaxMgLevels); #ifdef AZTEC Kn_mat = user_Kn_build( &Node_Partition); AZ_ML_Set_Amat(ml_nodes, MaxMgLevels-1, Node_Partition.Nlocal, Node_Partition.Nlocal, Kn_mat, proc_config); #else ML_Init_Amatrix (ml_nodes, MaxMgLevels-1 , Node_Partition.Nlocal, Node_Partition.Nlocal, &Node_Partition); Ntotal = Node_Partition.Nlocal; if (Node_Partition.nprocs == 2) Ntotal += Node_Partition.Nghost; ML_Set_Amatrix_Getrow(ml_nodes, MaxMgLevels-1, Kn_getrow, update_ghost_nodes, Ntotal); #endif /* Build an ML matrix representing the null space of the PDE problem. */ /* This should be a discrete gradient (nodes to edges). */ #ifdef AZTEC Tmat = user_T_build (&Edge_Partition, &Node_Partition, &(ml_nodes->Amat[MaxMgLevels-1])); #else Tmat = ML_Operator_Create(ml_nodes->comm); Tmat_data.edge = &Edge_Partition; Tmat_data.node = &Node_Partition; Tmat_data.Kn = &(ml_nodes->Amat[MaxMgLevels-1]); ML_Operator_Set_ApplyFuncData( Tmat, Node_Partition.Nlocal, Edge_Partition.Nlocal, ML_EMPTY, (void *) &Tmat_data, Edge_Partition.Nlocal, NULL, 0); ML_Operator_Set_Getrow( Tmat, ML_INTERNAL, Edge_Partition.Nlocal,Tmat_getrow); ML_Operator_Set_ApplyFunc(Tmat, ML_INTERNAL, Tmat_matvec); ML_Comm_Create( &comm); ML_CommInfoOP_Generate( &(Tmat->getrow->pre_comm), update_ghost_nodes, &Node_Partition,comm, Tmat->invec_leng, Node_Partition.Nghost); #endif /********************************************************************/ /* Set some ML parameters. */ /*------------------------------------------------------------------*/ ML_Set_ResidualOutputFrequency(ml_edges, 1); ML_Set_Tolerance(ml_edges, 1.0e-8); ML_Aggregate_Create( &ag ); ML_Aggregate_Set_CoarsenScheme_Uncoupled(ag); ML_Aggregate_Set_DampingFactor(ag, 0.0); /* must use 0 for maxwell */ ML_Aggregate_Set_MaxCoarseSize(ag, 30); ML_Aggregate_Set_Threshold(ag, 0.0); /********************************************************************/ /* Set up Tmat_trans */ /*------------------------------------------------------------------*/ Tmat_trans = ML_Operator_Create(ml_edges->comm); ML_Operator_Transpose_byrow(Tmat, Tmat_trans); Nlevels=ML_Gen_MGHierarchy_UsingReitzinger(ml_edges, &ml_nodes,MaxMgLevels-1, ML_DECREASING,ag,Tmat,Tmat_trans, &Tmat_array,&Tmat_trans_array, smoothPe_flag, 1.5); /* Set the Hiptmair subsmoothers */ if (nodal_smoother == (void *) ML_Gen_Smoother_SymGaussSeidel) { nodal_args = ML_Smoother_Arglist_Create(2); ML_Smoother_Arglist_Set(nodal_args, 0, &nodal_its); ML_Smoother_Arglist_Set(nodal_args, 1, &nodal_omega); } if (edge_smoother == (void *) ML_Gen_Smoother_SymGaussSeidel) { edge_args = ML_Smoother_Arglist_Create(2); ML_Smoother_Arglist_Set(edge_args, 0, &edge_its); ML_Smoother_Arglist_Set(edge_args, 1, &edge_omega); } if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) { nodal_args = ML_Smoother_Arglist_Create(2); ML_Smoother_Arglist_Set(nodal_args, 0, &nodal_its); Nfine_node = Tmat_array[MaxMgLevels-1]->invec_leng; Nfine_node = ML_gsum_int(Nfine_node, ml_edges->comm); } if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) { edge_args = ML_Smoother_Arglist_Create(2); ML_Smoother_Arglist_Set(edge_args, 0, &edge_its); Nfine_edge = Tmat_array[MaxMgLevels-1]->outvec_leng; Nfine_edge = ML_gsum_int(Nfine_edge, ml_edges->comm); } /**************************************************** * Set up smoothers for all levels but the coarsest. * ****************************************************/ coarsest_level = MaxMgLevels - Nlevels; for (level = MaxMgLevels-1; level > coarsest_level; level--) { if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) { Ncoarse_edge = Tmat_array[level-1]->outvec_leng; Ncoarse_edge = ML_gsum_int(Ncoarse_edge, ml_edges->comm); edge_coarsening_rate = 2.*((double) Nfine_edge)/ ((double) Ncoarse_edge); ML_Smoother_Arglist_Set(edge_args, 1, &edge_coarsening_rate); Nfine_edge = Ncoarse_edge; } if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) { Ncoarse_node = Tmat_array[level-1]->invec_leng; Ncoarse_node = ML_gsum_int(Ncoarse_node, ml_edges->comm); node_coarsening_rate = 2.*((double) Nfine_node)/ ((double) Ncoarse_node); ML_Smoother_Arglist_Set(nodal_args, 1, &node_coarsening_rate); Nfine_node = Ncoarse_node; } ML_Gen_Smoother_Hiptmair(ml_edges, level, ML_BOTH, Nits_per_presmooth, Tmat_array, Tmat_trans_array, NULL, edge_smoother, edge_args, nodal_smoother,nodal_args, hiptmair_type); } /******************************************* * Set up coarsest level smoother *******************************************/ if (edge_smoother == (void *) ML_Gen_Smoother_Cheby) { edge_coarsening_rate = (double) Nfine_edge; ML_Smoother_Arglist_Set(edge_args, 1, &edge_coarsening_rate); } if (nodal_smoother == (void *) ML_Gen_Smoother_Cheby) { node_coarsening_rate = (double) Nfine_node; ML_Smoother_Arglist_Set(nodal_args,1,&node_coarsening_rate); } ML_Gen_CoarseSolverSuperLU( ml_edges, coarsest_level); /* Must be called before invoking the preconditioner */ ML_Gen_Solver(ml_edges, ML_MGV, MaxMgLevels-1, coarsest_level); /* Set the initial guess and the right hand side. Invoke solver */ xxx = (double *) ML_allocate(Edge_Partition.Nlocal*sizeof(double)); ML_random_vec(xxx, Edge_Partition.Nlocal, ml_edges->comm); rhs = (double *) ML_allocate(Edge_Partition.Nlocal*sizeof(double)); ML_random_vec(rhs, Edge_Partition.Nlocal, ml_edges->comm); #ifdef AZTEC /* Choose the Aztec solver and criteria. Also tell Aztec that */ /* ML will be supplying the preconditioner. */ AZ_defaults(options, params); options[AZ_solver] = AZ_fixed_pt; options[AZ_solver] = AZ_gmres; options[AZ_kspace] = 80; params[AZ_tol] = tolerance; AZ_set_ML_preconditioner(&Pmat, Ke_mat, ml_edges, options); options[AZ_conv] = AZ_noscaled; AZ_iterate(xxx, rhs, options, params, status, proc_config, Ke_mat, Pmat, NULL); #else ML_Iterate(ml_edges, xxx, rhs); #endif /* clean up. */ ML_Smoother_Arglist_Delete(&nodal_args); ML_Smoother_Arglist_Delete(&edge_args); ML_Aggregate_Destroy(&ag); ML_Destroy(&ml_edges); ML_Destroy(&ml_nodes); #ifdef AZTEC AZ_free((void *) Ke_mat->data_org); AZ_free((void *) Ke_mat->val); AZ_free((void *) Ke_mat->bindx); if (Ke_mat != NULL) AZ_matrix_destroy(&Ke_mat); if (Pmat != NULL) AZ_precond_destroy(&Pmat); if (Kn_mat != NULL) AZ_matrix_destroy(&Kn_mat); #endif free(xxx); free(rhs); ML_Operator_Destroy(&Tmat); ML_Operator_Destroy(&Tmat_trans); ML_MGHierarchy_ReitzingerDestroy(MaxMgLevels-2, &Tmat_array, &Tmat_trans_array); #ifdef ML_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { int num_PDE_eqns=1, N_levels=3, nsmooth=2; int leng, level, N_grid_pts, coarsest_level; int leng1,leng2; /* See Aztec User's Guide for more information on the */ /* variables that follow. */ int proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE]; double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE]; /* data structure for matrix corresponding to the fine grid */ double *val = NULL, *xxx, *rhs, solve_time, setup_time, start_time; AZ_MATRIX *Amat; AZ_PRECOND *Pmat = NULL; ML *ml; FILE *fp; int i, j, Nrigid, *garbage, nblocks=0, *blocks = NULL, *block_pde=NULL; struct AZ_SCALING *scaling; ML_Aggregate *ag; double *mode, *rigid=NULL, alpha; char filename[80]; int one = 1; int proc,nprocs; char pathfilename[100]; #ifdef ML_MPI MPI_Init(&argc,&argv); /* get number of processors and the name of this processor */ AZ_set_proc_config(proc_config, MPI_COMM_WORLD); proc = proc_config[AZ_node]; nprocs = proc_config[AZ_N_procs]; #else AZ_set_proc_config(proc_config, AZ_NOT_MPI); proc = 0; nprocs = 1; #endif if (proc_config[AZ_node] == 0) { sprintf(pathfilename,"%s/inputfile",argv[1]); ML_Reader_ReadInput(pathfilename, &context); } else context = (struct reader_context *) ML_allocate(sizeof(struct reader_context)); AZ_broadcast((char *) context, sizeof(struct reader_context), proc_config, AZ_PACK); AZ_broadcast((char *) NULL , 0 , proc_config, AZ_SEND); N_levels = context->N_levels; printf("N_levels %d\n",N_levels); nsmooth = context->nsmooth; num_PDE_eqns = context->N_dofPerNode; printf("num_PDE_eqns %d\n",num_PDE_eqns); ML_Set_PrintLevel(context->output_level); /* read in the number of matrix equations */ leng = 0; if (proc_config[AZ_node] == 0) { sprintf(pathfilename,"%s/data_matrix.txt",argv[1]); fp=fopen(pathfilename,"r"); if (fp==NULL) { printf("**ERR** couldn't open file data_matrix.txt\n"); exit(1); } fscanf(fp,"%d",&leng); fclose(fp); } leng = AZ_gsum_int(leng, proc_config); N_grid_pts=leng/num_PDE_eqns; /* initialize the list of global indices. NOTE: the list of global */ /* indices must be in ascending order so that subsequent calls to */ /* AZ_find_index() will function properly. */ #if 0 if (proc_config[AZ_N_procs] == 1) i = AZ_linear; else i = AZ_file; #endif i = AZ_linear; /* cannot use AZ_input_update for variable blocks (forgot why, but debugged through it)*/ /* make a linear distribution of the matrix */ /* if the linear distribution does not align with the blocks, */ /* this is corrected in ML_AZ_Reader_ReadVariableBlocks */ leng1 = leng/nprocs; leng2 = leng-leng1*nprocs; if (proc >= leng2) { leng2 += (proc*leng1); } else { leng1++; leng2 = proc*leng1; } N_update = leng1; update = (int*)AZ_allocate((N_update+1)*sizeof(int)); if (update==NULL) { (void) fprintf (stderr, "Not enough space to allocate 'update'\n"); fflush(stderr); exit(EXIT_FAILURE); } for (i=0; i<N_update; i++) update[i] = i+leng2; #if 0 /* debug */ printf("proc %d N_update %d\n",proc_config[AZ_node],N_update); fflush(stdout); #endif sprintf(pathfilename,"%s/data_vblocks.txt",argv[1]); ML_AZ_Reader_ReadVariableBlocks(pathfilename,&nblocks,&blocks,&block_pde, &N_update,&update,proc_config); #if 0 /* debug */ printf("proc %d N_update %d\n",proc_config[AZ_node],N_update); fflush(stdout); #endif sprintf(pathfilename,"%s/data_matrix.txt",argv[1]); AZ_input_msr_matrix(pathfilename,update, &val, &bindx, N_update, proc_config); /* This code is to fix things up so that we are sure we have */ /* all blocks (including the ghost nodes) the same size. */ /* not sure, whether this is a good idea with variable blocks */ /* the examples inpufiles (see top of this file) don't need it */ /* anyway */ /* AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update); */ AZ_transform_norowreordering(proc_config, &external, bindx, val, update, &update_index, &extern_index, &data_org, N_update, 0, 0, 0, &cpntr, AZ_MSR_MATRIX); Amat = AZ_matrix_create( leng ); AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL); Amat->matrix_type = data_org[AZ_matrix_type]; data_org[AZ_N_rows] = data_org[AZ_N_internal] + data_org[AZ_N_border]; start_time = AZ_second(); options[AZ_scaling] = AZ_none; ML_Create(&ml, N_levels); /* set up discretization matrix and matrix vector function */ AZ_ML_Set_Amat(ml, 0, N_update, N_update, Amat, proc_config); ML_Set_ResidualOutputFrequency(ml, context->output); ML_Set_Tolerance(ml, context->tol); ML_Aggregate_Create( &ag ); if (ML_strcmp(context->agg_coarsen_scheme,"Mis") == 0) { ML_Aggregate_Set_CoarsenScheme_MIS(ag); } else if (ML_strcmp(context->agg_coarsen_scheme,"Uncoupled") == 0) { ML_Aggregate_Set_CoarsenScheme_Uncoupled(ag); } else if (ML_strcmp(context->agg_coarsen_scheme,"Coupled") == 0) { ML_Aggregate_Set_CoarsenScheme_Coupled(ag); } else if (ML_strcmp(context->agg_coarsen_scheme,"Metis") == 0) { ML_Aggregate_Set_CoarsenScheme_METIS(ag); for (i=0; i<N_levels; i++) ML_Aggregate_Set_NodesPerAggr(ml,ag,i,9); } else if (ML_strcmp(context->agg_coarsen_scheme,"VBMetis") == 0) { /* when no blocks read, use standard metis assuming constant block sizes */ if (!blocks) ML_Aggregate_Set_CoarsenScheme_METIS(ag); else { ML_Aggregate_Set_CoarsenScheme_VBMETIS(ag); ML_Aggregate_Set_Vblocks_CoarsenScheme_VBMETIS(ag,0,N_levels,nblocks, blocks,block_pde,N_update); } for (i=0; i<N_levels; i++) ML_Aggregate_Set_NodesPerAggr(ml,ag,i,9); } else { printf("**ERR** ML: Unknown aggregation scheme %s\n",context->agg_coarsen_scheme); exit(-1); } ML_Aggregate_Set_DampingFactor(ag, context->agg_damping); ML_Aggregate_Set_MaxCoarseSize( ag, context->maxcoarsesize); ML_Aggregate_Set_Threshold(ag, context->agg_thresh); if (ML_strcmp(context->agg_spectral_norm,"Calc") == 0) { ML_Set_SpectralNormScheme_Calc(ml); } else if (ML_strcmp(context->agg_spectral_norm,"Anorm") == 0) { ML_Set_SpectralNormScheme_Anorm(ml); } else { printf("**WRN** ML: Unknown spectral norm scheme %s\n",context->agg_spectral_norm); } /* read in the rigid body modes */ Nrigid = 0; if (proc_config[AZ_node] == 0) { sprintf(filename,"data_nullsp%d.txt",Nrigid); sprintf(pathfilename,"%s/%s",argv[1],filename); while( (fp = fopen(pathfilename,"r")) != NULL) { fclose(fp); Nrigid++; sprintf(filename,"data_nullsp%d.txt",Nrigid); sprintf(pathfilename,"%s/%s",argv[1],filename); } } Nrigid = AZ_gsum_int(Nrigid,proc_config); if (Nrigid != 0) { rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) ); if (rigid == NULL) { printf("Error: Not enough space for rigid body modes\n"); } } /* Set rhs */ sprintf(pathfilename,"%s/data_rhs.txt",argv[1]); fp = fopen(pathfilename,"r"); if (fp == NULL) { rhs=(double *)ML_allocate(leng*sizeof(double)); if (proc_config[AZ_node] == 0) printf("taking linear vector for rhs\n"); for (i = 0; i < N_update; i++) rhs[i] = (double) update[i]; } else { fclose(fp); if (proc_config[AZ_node] == 0) printf("reading rhs from a file\n"); AZ_input_msr_matrix(pathfilename, update, &rhs, &garbage, N_update, proc_config); } AZ_reorder_vec(rhs, data_org, update_index, NULL); for (i = 0; i < Nrigid; i++) { sprintf(filename,"data_nullsp%d.txt",i); sprintf(pathfilename,"%s/%s",argv[1],filename); AZ_input_msr_matrix(pathfilename, update, &mode, &garbage, N_update, proc_config); AZ_reorder_vec(mode, data_org, update_index, NULL); #if 0 /* test the given rigid body mode, output-vector should be ~0 */ Amat->matvec(mode, rigid, Amat, proc_config); for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]); #endif for (j = 0; j < i; j++) { alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/ AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); DAXPY_F77(&N_update, &alpha, &(rigid[j*N_update]), &one, mode, &one); } /* rhs orthogonalization */ alpha = -AZ_gdot(N_update, mode, rhs, proc_config)/ AZ_gdot(N_update, mode, mode, proc_config); DAXPY_F77(&N_update, &alpha, mode, &one, rhs, &one); for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j]; free(mode); free(garbage); } for (j = 0; j < Nrigid; j++) { alpha = -AZ_gdot(N_update, rhs, &(rigid[j*N_update]), proc_config)/ AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); DAXPY_F77(&N_update, &alpha, &(rigid[j*N_update]), &one, rhs, &one); } #if 0 /* for testing the default nullsp */ ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, 6, NULL, N_update); #else if (Nrigid != 0) { ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update); } #endif if (rigid) ML_free(rigid); ag->keep_agg_information = 1; coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, 0, ML_INCREASING, ag); coarsest_level--; if ( proc_config[AZ_node] == 0 ) printf("Coarse level = %d \n", coarsest_level); #if 0 /* set up smoothers */ if (!blocks) blocks = (int *) ML_allocate(sizeof(int)*N_update); #endif for (level = 0; level < coarsest_level; level++) { num_PDE_eqns = ml->Amat[level].num_PDEs; /* Sparse approximate inverse smoother that acutally does both */ /* pre and post smoothing. */ if (ML_strcmp(context->smoother,"Parasails") == 0) { ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth, parasails_sym, parasails_thresh, parasails_nlevels, parasails_filter, (int) parasails_loadbal, parasails_factorized); } /* This is the symmetric Gauss-Seidel smoothing that we usually use. */ /* In parallel, it is not a true Gauss-Seidel in that each processor */ /* does a Gauss-Seidel on its local submatrix independent of the */ /* other processors. */ else if (ML_strcmp(context->smoother,"GaussSeidel") == 0) { ML_Gen_Smoother_GaussSeidel(ml , level, ML_BOTH, nsmooth,1.); } else if (ML_strcmp(context->smoother,"SymGaussSeidel") == 0) { ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_BOTH, nsmooth,1.); } else if (ML_strcmp(context->smoother,"Poly") == 0) { ML_Gen_Smoother_Cheby(ml, level, ML_BOTH, 30., nsmooth); } else if (ML_strcmp(context->smoother,"BlockGaussSeidel") == 0) { ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_BOTH, nsmooth,1., num_PDE_eqns); } else if (ML_strcmp(context->smoother,"VBSymGaussSeidel") == 0) { if (blocks) ML_free(blocks); if (block_pde) ML_free(block_pde); blocks = NULL; block_pde = NULL; nblocks = 0; ML_Aggregate_Get_Vblocks_CoarsenScheme_VBMETIS(ag,level,N_levels,&nblocks, &blocks,&block_pde); if (blocks==NULL) ML_Gen_Blocks_Aggregates(ag, level, &nblocks, &blocks); ML_Gen_Smoother_VBlockSymGaussSeidel(ml , level, ML_BOTH, nsmooth,1., nblocks, blocks); } /* This is a true Gauss Seidel in parallel. This seems to work for */ /* elasticity problems. However, I don't believe that this is very */ /* efficient in parallel. */ /* nblocks = ml->Amat[level].invec_leng; for (i =0; i < nblocks; i++) blocks[i] = i; ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml , level, ML_PRESMOOTHER, nsmooth, 1., nblocks, blocks); ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml, level, ML_POSTSMOOTHER, nsmooth, 1., nblocks, blocks); */ /* Jacobi Smoothing */ else if (ML_strcmp(context->smoother,"Jacobi") == 0) { ML_Gen_Smoother_Jacobi(ml , level, ML_PRESMOOTHER, nsmooth,.4); ML_Gen_Smoother_Jacobi(ml , level, ML_POSTSMOOTHER, nsmooth,.4); } /* This does a block Gauss-Seidel (not true GS in parallel) */ /* where each processor has 'nblocks' blocks. */ /* */ else if (ML_strcmp(context->smoother,"Metis") == 0) { if (blocks) ML_free(blocks); if (block_pde) ML_free(block_pde); nblocks = 250; ML_Gen_Blocks_Metis(ml, level, &nblocks, &blocks); ML_Gen_Smoother_VBlockSymGaussSeidel(ml , level, ML_BOTH, nsmooth,1., nblocks, blocks); } else { printf("unknown smoother %s\n",context->smoother); exit(1); } } /* set coarse level solver */ nsmooth = context->coarse_its; /* Sparse approximate inverse smoother that acutally does both */ /* pre and post smoothing. */ if (ML_strcmp(context->coarse_solve,"Parasails") == 0) { ML_Gen_Smoother_ParaSails(ml , coarsest_level, ML_PRESMOOTHER, nsmooth, parasails_sym, parasails_thresh, parasails_nlevels, parasails_filter, (int) parasails_loadbal, parasails_factorized); } else if (ML_strcmp(context->coarse_solve,"GaussSeidel") == 0) { ML_Gen_Smoother_GaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1.); } else if (ML_strcmp(context->coarse_solve,"Poly") == 0) { ML_Gen_Smoother_Cheby(ml, coarsest_level, ML_BOTH, 30., nsmooth); } else if (ML_strcmp(context->coarse_solve,"SymGaussSeidel") == 0) { ML_Gen_Smoother_SymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1.); } else if (ML_strcmp(context->coarse_solve,"BlockGaussSeidel") == 0) { ML_Gen_Smoother_BlockGaussSeidel(ml, coarsest_level, ML_BOTH, nsmooth,1., num_PDE_eqns); } else if (ML_strcmp(context->coarse_solve,"Aggregate") == 0) { if (blocks) ML_free(blocks); if (block_pde) ML_free(block_pde); ML_Gen_Blocks_Aggregates(ag, coarsest_level, &nblocks, &blocks); ML_Gen_Smoother_VBlockSymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1., nblocks, blocks); } else if (ML_strcmp(context->coarse_solve,"Jacobi") == 0) { ML_Gen_Smoother_Jacobi(ml , coarsest_level, ML_BOTH, nsmooth,.5); } else if (ML_strcmp(context->coarse_solve,"Metis") == 0) { if (blocks) ML_free(blocks); if (block_pde) ML_free(block_pde); nblocks = 250; ML_Gen_Blocks_Metis(ml, coarsest_level, &nblocks, &blocks); ML_Gen_Smoother_VBlockSymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1., nblocks, blocks); } else if (ML_strcmp(context->coarse_solve,"SuperLU") == 0) { ML_Gen_CoarseSolverSuperLU( ml, coarsest_level); } else if (ML_strcmp(context->coarse_solve,"Amesos") == 0) { ML_Gen_Smoother_Amesos(ml,coarsest_level,ML_AMESOS_KLU,-1, 0.0); } else { printf("unknown coarse grid solver %s\n",context->coarse_solve); exit(1); } ML_Gen_Solver(ml, ML_MGV, 0, coarsest_level); AZ_defaults(options, params); if (ML_strcmp(context->krylov,"Cg") == 0) { options[AZ_solver] = AZ_cg; } else if (ML_strcmp(context->krylov,"Bicgstab") == 0) { options[AZ_solver] = AZ_bicgstab; } else if (ML_strcmp(context->krylov,"Tfqmr") == 0) { options[AZ_solver] = AZ_tfqmr; } else if (ML_strcmp(context->krylov,"Gmres") == 0) { options[AZ_solver] = AZ_gmres; } else { printf("unknown krylov method %s\n",context->krylov); } if (blocks) ML_free(blocks); if (block_pde) ML_free(block_pde); options[AZ_scaling] = AZ_none; options[AZ_precond] = AZ_user_precond; options[AZ_conv] = AZ_r0; options[AZ_output] = 1; options[AZ_max_iter] = context->max_outer_its; options[AZ_poly_ord] = 5; options[AZ_kspace] = 130; params[AZ_tol] = context->tol; options[AZ_output] = context->output; ML_free(context); AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); setup_time = AZ_second() - start_time; xxx = (double *) malloc( leng*sizeof(double)); for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; /* Set x */ /* there is no initguess supplied with these examples for the moment.... */ fp = fopen("initguessfile","r"); if (fp != NULL) { fclose(fp); if (proc_config[AZ_node]== 0) printf("reading initial guess from file\n"); AZ_input_msr_matrix("data_initguess.txt", update, &xxx, &garbage, N_update, proc_config); options[AZ_conv] = AZ_expected_values; } else if (proc_config[AZ_node]== 0) printf("taking 0 initial guess \n"); AZ_reorder_vec(xxx, data_org, update_index, NULL); /* if Dirichlet BC ... put the answer in */ for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) { if ( (val[i] > .99999999) && (val[i] < 1.0000001)) xxx[i] = rhs[i]; } fp = fopen("AZ_no_multilevel.dat","r"); scaling = AZ_scaling_create(); start_time = AZ_second(); if (fp != NULL) { fclose(fp); options[AZ_precond] = AZ_none; options[AZ_scaling] = AZ_sym_diag; options[AZ_ignore_scaling] = AZ_TRUE; options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); /* options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); */ } else { options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; /* if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); */ } solve_time = AZ_second() - start_time; if (proc_config[AZ_node] == 0) printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time); if (proc_config[AZ_node] == 0) printf("Printing out a few entries of the solution ...\n"); for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 7) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 23) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 47) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 101) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 171) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} ML_Aggregate_Destroy(&ag); ML_Destroy(&ml); AZ_free((void *) Amat->data_org); AZ_free((void *) Amat->val); AZ_free((void *) Amat->bindx); AZ_free((void *) update); AZ_free((void *) external); AZ_free((void *) extern_index); AZ_free((void *) update_index); AZ_scaling_destroy(&scaling); if (Amat != NULL) AZ_matrix_destroy(&Amat); if (Pmat != NULL) AZ_precond_destroy(&Pmat); free(xxx); free(rhs); #ifdef ML_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { int num_PDE_eqns=5, N_levels=3; /* int nsmooth=1; */ int leng, level, N_grid_pts, coarsest_level; /* See Aztec User's Guide for more information on the */ /* variables that follow. */ int proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE]; double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE]; /* data structure for matrix corresponding to the fine grid */ int *data_org = NULL, *update = NULL, *external = NULL; int *update_index = NULL, *extern_index = NULL; int *cpntr = NULL; int *bindx = NULL, N_update, iii; double *val = NULL; double *xxx, *rhs; AZ_MATRIX *Amat; AZ_PRECOND *Pmat = NULL; ML *ml; FILE *fp; int ch,i; struct AZ_SCALING *scaling; double solve_time, setup_time, start_time; ML_Aggregate *ag; int *ivec; #ifdef VBR_VERSION ML_Operator *B, *C, *D; int *vbr_cnptr, *vbr_rnptr, *vbr_indx, *vbr_bindx, *vbr_bnptr, total_blk_rows; int total_blk_cols, blk_space, nz_space; double *vbr_val; struct ML_CSR_MSRdata *csr_data; #endif #ifdef ML_MPI MPI_Init(&argc,&argv); /* get number of processors and the name of this processor */ AZ_set_proc_config(proc_config, MPI_COMM_WORLD); #else AZ_set_proc_config(proc_config, AZ_NOT_MPI); #endif #ifdef binary fp=fopen(".data","rb"); #else fp=fopen(".data","r"); #endif if (fp==NULL) { printf("couldn't open file .data\n"); exit(1); } #ifdef binary fread(&leng, sizeof(int), 1, fp); #else fscanf(fp,"%d",&leng); #endif fclose(fp); N_grid_pts=leng/num_PDE_eqns; /* initialize the list of global indices. NOTE: the list of global */ /* indices must be in ascending order so that subsequent calls to */ /* AZ_find_index() will function properly. */ AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns, AZ_linear); AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config); /* This code is to fix things up so that we are sure we have */ /* all block (including the ghost nodes the same size. */ AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update); AZ_transform(proc_config, &external, bindx, val, update, &update_index, &extern_index, &data_org, N_update, 0, 0, 0, &cpntr, AZ_MSR_MATRIX); Amat = AZ_matrix_create( leng ); #ifndef VBR_VERSION AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL); Amat->matrix_type = data_org[AZ_matrix_type]; data_org[AZ_N_rows] = data_org[AZ_N_internal] + data_org[AZ_N_border]; #else total_blk_rows = N_update/num_PDE_eqns; total_blk_cols = total_blk_rows; blk_space = total_blk_rows*20; nz_space = blk_space*num_PDE_eqns*num_PDE_eqns; vbr_cnptr = (int *) ML_allocate(sizeof(int )*(total_blk_cols+1)); vbr_rnptr = (int *) ML_allocate(sizeof(int )*(total_blk_cols+1)); vbr_bnptr = (int *) ML_allocate(sizeof(int )*(total_blk_cols+2)); vbr_indx = (int *) ML_allocate(sizeof(int )*(blk_space+1)); vbr_bindx = (int *) ML_allocate(sizeof(int )*(blk_space+1)); vbr_val = (double *) ML_allocate(sizeof(double)*(nz_space+1)); for (i = 0; i <= total_blk_cols; i++) vbr_cnptr[i] = num_PDE_eqns; AZ_msr2vbr(vbr_val, vbr_indx, vbr_rnptr, vbr_cnptr, vbr_bnptr, vbr_bindx, bindx, val, total_blk_rows, total_blk_cols, blk_space, nz_space, -1); data_org[AZ_N_rows] = data_org[AZ_N_internal] + data_org[AZ_N_border]; data_org[AZ_N_int_blk] = data_org[AZ_N_internal]/num_PDE_eqns; data_org[AZ_N_bord_blk] = data_org[AZ_N_bord_blk]/num_PDE_eqns; data_org[AZ_N_ext_blk] = data_org[AZ_N_ext_blk]/num_PDE_eqns; data_org[AZ_matrix_type] = AZ_VBR_MATRIX; AZ_set_VBR(Amat, vbr_rnptr, vbr_cnptr, vbr_bnptr, vbr_indx, vbr_bindx, vbr_val, data_org, 0, NULL, AZ_LOCAL); Amat->matrix_type = data_org[AZ_matrix_type]; #endif start_time = AZ_second(); ML_Create(&ml, N_levels); ML_Set_PrintLevel(3); /* set up discretization matrix and matrix vector function */ AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config); ML_Aggregate_Create( &ag ); ML_Aggregate_Set_Threshold(ag,0.0); ML_Set_SpectralNormScheme_PowerMethod(ml); /* To run SA: a) set damping factor to 1 and use power method ML_Aggregate_Set_DampingFactor(ag, 4./3.); To run NSA: a) set damping factor to 0 ML_Aggregate_Set_DampingFactor(ag, 0.); To run NSR a) set damping factor to 1 and use power method ML_Aggregate_Set_DampingFactor(ag, 1.); ag->Restriction_smoothagg_transpose = ML_FALSE; ag->keep_agg_information=1; ag->keep_P_tentative=1; b) hack code so it calls the energy minimizing restriction line 2973 of ml_agg_genP.c c) turn on the NSR flag in ml_agg_energy_min.cpp To run Emin a) set min_eneryg = 2 and keep_agg_info = 1; ag->minimizing_energy=2; ag->keep_agg_information=1; ag->cheap_minimizing_energy = 0; ag->block_scaled_SA = 1; */ ag->minimizing_energy=2; ag->keep_agg_information=1; ag->block_scaled_SA = 1; ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, num_PDE_eqns, NULL, N_update); ML_Aggregate_Set_MaxCoarseSize( ag, 20); /* ML_Aggregate_Set_RandomOrdering( ag ); ML_Aggregate_Set_DampingFactor(ag, .1); ag->drop_tol_for_smoothing = 1.0e-3; ML_Aggregate_Set_Threshold(ag, 1.0e-3); ML_Aggregate_Set_MaxCoarseSize( ag, 300); */ coarsest_level = ML_Gen_MultiLevelHierarchy_UsingAggregation(ml, N_levels-1, ML_DECREASING, ag); coarsest_level = N_levels - coarsest_level; if ( proc_config[AZ_node] == 0 ) printf("Coarse level = %d \n", coarsest_level); /* set up smoothers */ AZ_defaults(options, params); for (level = N_levels-1; level > coarsest_level; level--) { /* This is the Aztec domain decomp/ilu smoother that we */ /* usually use for this problem. */ /* options[AZ_precond] = AZ_dom_decomp; options[AZ_subdomain_solve] = AZ_ilut; params[AZ_ilut_fill] = 1.0; options[AZ_reorder] = 1; ML_Gen_SmootherAztec(ml, level, options, params, proc_config, status, AZ_ONLY_PRECONDITIONER, ML_PRESMOOTHER,NULL); */ /* Sparse approximate inverse smoother that acutally does both */ /* pre and post smoothing. */ /* ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth, parasails_sym, parasails_thresh, parasails_nlevels, parasails_filter, parasails_loadbal, parasails_factorized); parasails_thresh /= 4.; */ /* This is the symmetric Gauss-Seidel smoothing. In parallel, */ /* it is not a true Gauss-Seidel in that each processor */ /* does a Gauss-Seidel on its local submatrix independent of the */ /* other processors. */ /* ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_PRESMOOTHER, nsmooth,1.); ML_Gen_Smoother_SymGaussSeidel(ml,level,ML_POSTSMOOTHER,nsmooth,1.); */ /* Block Gauss-Seidel with block size equal to #DOF per node. */ /* Not a true Gauss-Seidel in that each processor does a */ /* Gauss-Seidel on its local submatrix independent of the other */ /* processors. */ /* ML_Gen_Smoother_BlockGaussSeidel(ml,level,ML_PRESMOOTHER, nsmooth,0.67, num_PDE_eqns); ML_Gen_Smoother_BlockGaussSeidel(ml,level,ML_POSTSMOOTHER, nsmooth, 0.67, num_PDE_eqns); */ ML_Gen_Smoother_SymBlockGaussSeidel(ml,level,ML_POSTSMOOTHER, 1, 1.0, num_PDE_eqns); } ML_Gen_CoarseSolverSuperLU( ml, coarsest_level); ML_Gen_Solver(ml, ML_MGW, N_levels-1, coarsest_level); AZ_defaults(options, params); options[AZ_solver] = AZ_gmres; options[AZ_scaling] = AZ_none; options[AZ_precond] = AZ_user_precond; /* options[AZ_conv] = AZ_r0; */ options[AZ_output] = 1; options[AZ_max_iter] = 1500; options[AZ_poly_ord] = 5; options[AZ_kspace] = 130; params[AZ_tol] = 1.0e-8; /* options[AZ_precond] = AZ_dom_decomp; options[AZ_subdomain_solve] = AZ_ilut; params[AZ_ilut_fill] = 2.0; */ AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); setup_time = AZ_second() - start_time; xxx = (double *) malloc( leng*sizeof(double)); rhs=(double *)malloc(leng*sizeof(double)); for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; /* Set rhs */ fp = fopen("AZ_capture_rhs.mat","r"); if (fp == NULL) { if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n"); AZ_random_vector(rhs, data_org, proc_config); AZ_reorder_vec(rhs, data_org, update_index, NULL); } else { fclose(fp); ivec =(int *)malloc((leng+1)*sizeof(int)); AZ_input_msr_matrix("AZ_capture_rhs.mat", update, &rhs, &ivec, N_update, proc_config); free(ivec); AZ_reorder_vec(rhs, data_org, update_index, NULL); } /* Set x */ fp = fopen("AZ_capture_init_guess.mat","r"); if (fp != NULL) { fclose(fp); ivec =(int *)malloc((leng+1)*sizeof(int)); AZ_input_msr_matrix("AZ_capture_init_guess.mat",update, &xxx, &ivec, N_update, proc_config); free(ivec); AZ_reorder_vec(xxx, data_org, update_index, NULL); } /* if Dirichlet BC ... put the answer in */ for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) { if ( (val[i] > .99999999) && (val[i] < 1.0000001)) xxx[i] = rhs[i]; } fp = fopen("AZ_no_multilevel.dat","r"); scaling = AZ_scaling_create(); start_time = AZ_second(); if (fp != NULL) { fclose(fp); options[AZ_precond] = AZ_none; options[AZ_scaling] = AZ_sym_diag; options[AZ_ignore_scaling] = AZ_TRUE; options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); /* options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); */ } else { options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; /* if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); */ } solve_time = AZ_second() - start_time; if (proc_config[AZ_node] == 0) printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time); ML_Aggregate_Destroy(&ag); ML_Destroy(&ml); AZ_free((void *) Amat->data_org); AZ_free((void *) Amat->val); AZ_free((void *) Amat->bindx); AZ_free((void *) update); AZ_free((void *) external); AZ_free((void *) extern_index); AZ_free((void *) update_index); AZ_scaling_destroy(&scaling); if (Amat != NULL) AZ_matrix_destroy(&Amat); if (Pmat != NULL) AZ_precond_destroy(&Pmat); free(xxx); free(rhs); #ifdef ML_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { int num_PDE_eqns=3, N_levels=3, nsmooth=1; int leng, level, N_grid_pts, coarsest_level; /* See Aztec User's Guide for more information on the */ /* variables that follow. */ int proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE]; double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE]; /* data structure for matrix corresponding to the fine grid */ int *data_org = NULL, *update = NULL, *external = NULL; int *update_index = NULL, *extern_index = NULL; int *cpntr = NULL; int *bindx = NULL, N_update, iii; double *val = NULL; double *xxx, *rhs; AZ_MATRIX *Amat; AZ_PRECOND *Pmat = NULL; ML *ml; FILE *fp; int ch,i,j, Nrigid, *garbage; struct AZ_SCALING *scaling; double solve_time, setup_time, start_time, *mode, *rigid; ML_Aggregate *ag; int nblocks, *blocks; char filename[80]; double alpha; int one = 1; #ifdef ML_MPI MPI_Init(&argc,&argv); /* get number of processors and the name of this processor */ AZ_set_proc_config(proc_config, MPI_COMM_WORLD); #else AZ_set_proc_config(proc_config, AZ_NOT_MPI); #endif leng = 0; if (proc_config[AZ_node] == 0) { #ifdef binary fp=fopen(".data","rb"); #else fp=fopen(".data","r"); #endif if (fp==NULL) { printf("couldn't open file .data\n"); exit(1); } #ifdef binary fread(&leng, sizeof(int), 1, fp); #else fscanf(fp,"%d",&leng); #endif fclose(fp); } leng = AZ_gsum_int(leng, proc_config); N_grid_pts=leng/num_PDE_eqns; /* initialize the list of global indices. NOTE: the list of global */ /* indices must be in ascending order so that subsequent calls to */ /* AZ_find_index() will function properly. */ AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns, AZ_linear); AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config); AZ_transform(proc_config, &external, bindx, val, update, &update_index, &extern_index, &data_org, N_update, 0, 0, 0, &cpntr, AZ_MSR_MATRIX); Amat = AZ_matrix_create( leng ); AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL); Amat->matrix_type = data_org[AZ_matrix_type]; data_org[AZ_N_rows] = data_org[AZ_N_internal] + data_org[AZ_N_border]; start_time = AZ_second(); AZ_defaults(options, params); /* scaling = AZ_scaling_create(); xxx = (double *) calloc( leng,sizeof(double)); rhs=(double *)calloc(leng,sizeof(double)); options[AZ_scaling] = AZ_sym_diag; options[AZ_precond] = AZ_none; options[AZ_max_iter] = 30; options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); don't forget vector rescaling ... free(xxx); free(rhs); */ options[AZ_scaling] = AZ_none; ML_Create(&ml, N_levels); /* set up discretization matrix and matrix vector function */ AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config); ML_Aggregate_Create( &ag ); Nrigid = 0; if (proc_config[AZ_node] == 0) { sprintf(filename,"rigid_body_mode%d",Nrigid+1); while( (fp = fopen(filename,"r")) != NULL) { fclose(fp); Nrigid++; sprintf(filename,"rigid_body_mode%d",Nrigid+1); } } Nrigid = AZ_gsum_int(Nrigid,proc_config); if (Nrigid != 0) { rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) ); if (rigid == NULL) { printf("Error: Not enough space for rigid body modes\n"); } } rhs=(double *)malloc(leng*sizeof(double)); AZ_random_vector(rhs, data_org, proc_config); for (i = 0; i < Nrigid; i++) { sprintf(filename,"rigid_body_mode%d",i+1); AZ_input_msr_matrix(filename, update, &mode, &garbage, N_update, proc_config); /* AZ_sym_rescale_sl(mode, Amat->data_org, options, proc_config, scaling); */ /* Amat->matvec(mode, rigid, Amat, proc_config); for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]); */ for (j = 0; j < i; j++) { alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); daxpy_(&N_update, &alpha, &(rigid[j*N_update]), &one, mode, &one); printf("alpha1 is %e\n",alpha); } alpha = -AZ_gdot(N_update, mode, rhs, proc_config)/AZ_gdot(N_update, mode, mode, proc_config); printf("alpha2 is %e\n",alpha); daxpy_(&N_update, &alpha, mode, &one, rhs, &one); for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j]; free(mode); free(garbage); } for (j = 0; j < Nrigid; j++) { alpha = -AZ_gdot(N_update, rhs, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); daxpy_(&N_update, &alpha, &(rigid[j*N_update]), &one, rhs, &one); printf("alpha4 is %e\n",alpha); } for (i = 0; i < Nrigid; i++) { alpha = -AZ_gdot(N_update, &(rigid[i*N_update]), rhs, proc_config); printf("alpha is %e\n",alpha); } if (Nrigid != 0) { ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update); /* free(rigid); */ } coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, N_levels-1, ML_DECREASING, ag); coarsest_level = N_levels - coarsest_level; /* ML_Operator_Print(&(ml->Pmat[N_levels-2]), "Pmat"); exit(1); */ if ( proc_config[AZ_node] == 0 ) printf("Coarse level = %d \n", coarsest_level); /* set up smoothers */ for (level = N_levels-1; level > coarsest_level; level--) { j = 10; if (level == N_levels-1) j = 10; options[AZ_solver] = AZ_cg; options[AZ_precond]=AZ_sym_GS; options[AZ_subdomain_solve]=AZ_icc; /* options[AZ_precond] = AZ_none; */ options[AZ_poly_ord] = 5; ML_Gen_SmootherAztec(ml, level, options, params, proc_config, status, j, ML_PRESMOOTHER,NULL); ML_Gen_SmootherAztec(ml, level, options, params, proc_config, status, j, ML_POSTSMOOTHER,NULL); /* ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth,1.0); ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth,1.0); */ /* nblocks = ML_Aggregate_Get_AggrCount( ag, level ); ML_Aggregate_Get_AggrMap( ag, level, &blocks); ML_Gen_Smoother_VBlockSymGaussSeidel( ml , level, ML_BOTH, nsmooth, 1.0, nblocks, blocks); ML_Gen_Smoother_VBlockSymGaussSeidel( ml , level, ML_POSTSMOOTHER, nsmooth, 1.0, nblocks, blocks); */ /* ML_Gen_Smoother_VBlockJacobi( ml , level, ML_PRESMOOTHER, nsmooth, .5, nblocks, blocks); ML_Gen_Smoother_VBlockJacobi( ml , level, ML_POSTSMOOTHER, nsmooth,.5, nblocks, blocks); */ /* ML_Gen_Smoother_GaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth); ML_Gen_Smoother_GaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth); */ /* need to change this when num_pdes is different on different levels */ /* if (level == N_levels-1) { ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth, 0.5, num_PDE_eqns); ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth, 0.5, num_PDE_eqns); } else { ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, nsmooth, 0.5, 2*num_PDE_eqns); ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_POSTSMOOTHER, nsmooth, 0.5, 2*num_PDE_eqns); } */ /* */ /* ML_Gen_SmootherJacobi(ml , level, ML_PRESMOOTHER, nsmooth, .67); ML_Gen_SmootherJacobi(ml , level, ML_POSTSMOOTHER, nsmooth, .67 ); */ } /* ML_Gen_CoarseSolverSuperLU( ml, coarsest_level); */ /* ML_Gen_SmootherSymGaussSeidel(ml , coarsest_level, ML_PRESMOOTHER, 2*nsmooth,1.); */ /* ML_Gen_SmootherBlockGaussSeidel(ml , level, ML_PRESMOOTHER, 50*nsmooth, 1.0, 2*num_PDE_eqns); */ ML_Gen_Smoother_BlockGaussSeidel(ml , level, ML_PRESMOOTHER, 2*nsmooth, 1.0, num_PDE_eqns); ML_Gen_Solver(ml, ML_MGV, N_levels-1, coarsest_level); AZ_defaults(options, params); options[AZ_solver] = AZ_GMRESR; options[AZ_scaling] = AZ_none; options[AZ_precond] = AZ_user_precond; options[AZ_conv] = AZ_rhs; options[AZ_output] = 1; options[AZ_max_iter] = 1500; options[AZ_poly_ord] = 5; options[AZ_kspace] = 130; params[AZ_tol] = 1.0e-8; AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); setup_time = AZ_second() - start_time; xxx = (double *) malloc( leng*sizeof(double)); /* Set rhs */ fp = fopen("AZ_capture_rhs.dat","r"); if (fp == NULL) { if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n"); /* AZ_random_vector(rhs, data_org, proc_config); AZ_reorder_vec(rhs, data_org, update_index, NULL); AZ_random_vector(xxx, data_org, proc_config); AZ_reorder_vec(xxx, data_org, update_index, NULL); Amat->matvec(xxx, rhs, Amat, proc_config); */ } else { ch = getc(fp); if (ch == 'S') { while ( (ch = getc(fp)) != '\n') ; } else ungetc(ch,fp); for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) fscanf(fp,"%lf",&(rhs[i])); fclose(fp); } for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; /* Set x */ fp = fopen("AZ_capture_init_guess.dat","r"); if (fp != NULL) { ch = getc(fp); if (ch == 'S') { while ( (ch = getc(fp)) != '\n') ; } else ungetc(ch,fp); for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) fscanf(fp,"%lf",&(xxx[i])); fclose(fp); options[AZ_conv] = AZ_expected_values; } /* if Dirichlet BC ... put the answer in */ for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) { if ( (val[i] > .99999999) && (val[i] < 1.0000001)) xxx[i] = rhs[i]; } fp = fopen("AZ_no_multilevel.dat","r"); scaling = AZ_scaling_create(); start_time = AZ_second(); if (fp != NULL) { fclose(fp); options[AZ_precond] = AZ_none; options[AZ_scaling] = AZ_sym_diag; options[AZ_ignore_scaling] = AZ_TRUE; options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); /* options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); */ } else { options[AZ_keep_info] = 1; /* options[AZ_max_iter] = 40; */ AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); for (j = 0; j < Nrigid; j++) { alpha = -AZ_gdot(N_update, xxx, &(rigid[j*N_update]), proc_config)/AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); daxpy_(&N_update, &alpha, &(rigid[j*N_update]), &one, xxx, &one); printf("alpha5 is %e\n",alpha); } AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; /* if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); */ } solve_time = AZ_second() - start_time; if (proc_config[AZ_node] == 0) printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time); ML_Aggregate_Destroy(&ag); ML_Destroy(&ml); AZ_free((void *) Amat->data_org); AZ_free((void *) Amat->val); AZ_free((void *) Amat->bindx); AZ_free((void *) update); AZ_free((void *) external); AZ_free((void *) extern_index); AZ_free((void *) update_index); if (Amat != NULL) AZ_matrix_destroy(&Amat); if (Pmat != NULL) AZ_precond_destroy(&Pmat); free(xxx); free(rhs); #ifdef ML_MPI MPI_Finalize(); #endif return 0; }
int main(int argc, char *argv[]) { int num_PDE_eqns=6, N_levels=4, nsmooth=2; int leng, level, N_grid_pts, coarsest_level; /* See Aztec User's Guide for more information on the */ /* variables that follow. */ int proc_config[AZ_PROC_SIZE], options[AZ_OPTIONS_SIZE]; double params[AZ_PARAMS_SIZE], status[AZ_STATUS_SIZE]; /* data structure for matrix corresponding to the fine grid */ double *val = NULL, *xxx, *rhs, solve_time, setup_time, start_time; AZ_MATRIX *Amat; AZ_PRECOND *Pmat = NULL; ML *ml; FILE *fp; int i, j, Nrigid, *garbage = NULL; #ifdef ML_partition int nblocks; int *block_list = NULL; int k; #endif struct AZ_SCALING *scaling; ML_Aggregate *ag; double *mode, *rigid; char filename[80]; double alpha; int allocated = 0; int old_prec, old_sol; double old_tol; /* double *Amode, beta, biggest; int big_ind = -1, ii; */ ML_Operator *Amatrix; int *rowi_col = NULL, rowi_N, count2, ccc; double *rowi_val = NULL; double max_diag, min_diag, max_sum, sum; int nBlocks, *blockIndices, Ndof; #ifdef ML_partition FILE *fp2; int count; if (argc != 2) { printf("Usage: ml_read_elas num_processors\n"); exit(1); } else sscanf(argv[1],"%d",&nblocks); #endif #ifdef HAVE_MPI MPI_Init(&argc,&argv); /* get number of processors and the name of this processor */ AZ_set_proc_config(proc_config, MPI_COMM_WORLD); #else AZ_set_proc_config(proc_config, AZ_NOT_MPI); #endif /* read in the number of matrix equations */ leng = 0; if (proc_config[AZ_node] == 0) { # ifdef binary fp=fopen(".data","rb"); # else fp=fopen(".data","r"); # endif if (fp==NULL) { printf("couldn't open file .data\n"); exit(1); } # ifdef binary fread(&leng, sizeof(int), 1, fp); # else fscanf(fp,"%d",&leng); # endif fclose(fp); } leng = AZ_gsum_int(leng, proc_config); N_grid_pts=leng/num_PDE_eqns; /* initialize the list of global indices. NOTE: the list of global */ /* indices must be in ascending order so that subsequent calls to */ /* AZ_find_index() will function properly. */ if (proc_config[AZ_N_procs] == 1) i = AZ_linear; else i = AZ_file; AZ_read_update(&N_update, &update, proc_config, N_grid_pts, num_PDE_eqns,i); AZ_read_msr_matrix(update, &val, &bindx, N_update, proc_config); /* This code is to fix things up so that we are sure we have */ /* all block (including the ghost nodes the same size. */ AZ_block_MSR(&bindx, &val, N_update, num_PDE_eqns, update); AZ_transform_norowreordering(proc_config, &external, bindx, val, update, &update_index, &extern_index, &data_org, N_update, 0, 0, 0, &cpntr, AZ_MSR_MATRIX); Amat = AZ_matrix_create( leng ); AZ_set_MSR(Amat, bindx, val, data_org, 0, NULL, AZ_LOCAL); Amat->matrix_type = data_org[AZ_matrix_type]; data_org[AZ_N_rows] = data_org[AZ_N_internal] + data_org[AZ_N_border]; #ifdef SCALE_ME ML_MSR_sym_diagonal_scaling(Amat, proc_config, &scaling_vect); #endif start_time = AZ_second(); options[AZ_scaling] = AZ_none; ML_Create(&ml, N_levels); ML_Set_PrintLevel(10); /* set up discretization matrix and matrix vector function */ AZ_ML_Set_Amat(ml, N_levels-1, N_update, N_update, Amat, proc_config); #ifdef ML_partition /* this code is meant to partition the matrices so that things can be */ /* run in parallel later. */ /* It is meant to be run on only one processor. */ #ifdef MB_MODIF fp2 = fopen(".update","w"); #else fp2 = fopen("partition_file","w"); #endif ML_Operator_AmalgamateAndDropWeak(&(ml->Amat[N_levels-1]), num_PDE_eqns, 0.0); ML_Gen_Blocks_Metis(ml, N_levels-1, &nblocks, &block_list); for (i = 0; i < nblocks; i++) { count = 0; for (j = 0; j < ml->Amat[N_levels-1].outvec_leng; j++) { if (block_list[j] == i) count++; } fprintf(fp2," %d\n",count*num_PDE_eqns); for (j = 0; j < ml->Amat[N_levels-1].outvec_leng; j++) { if (block_list[j] == i) { for (k = 0; k < num_PDE_eqns; k++) fprintf(fp2,"%d\n",j*num_PDE_eqns+k); } } } fclose(fp2); ML_Operator_UnAmalgamateAndDropWeak(&(ml->Amat[N_levels-1]),num_PDE_eqns,0.0); #ifdef MB_MODIF printf(" partition file dumped in .update\n"); #endif exit(1); #endif ML_Aggregate_Create( &ag ); /* ML_Aggregate_Set_CoarsenScheme_MIS(ag); */ #ifdef MB_MODIF ML_Aggregate_Set_DampingFactor(ag,1.50); #else ML_Aggregate_Set_DampingFactor(ag,1.5); #endif ML_Aggregate_Set_CoarsenScheme_METIS(ag); ML_Aggregate_Set_NodesPerAggr( ml, ag, -1, 35); /* ML_Aggregate_Set_Phase3AggregateCreationAggressiveness(ag, 10.001); */ ML_Aggregate_Set_Threshold(ag, 0.0); ML_Aggregate_Set_MaxCoarseSize( ag, 300); /* read in the rigid body modes */ Nrigid = 0; /* to ensure compatibility with RBM dumping software */ if (proc_config[AZ_node] == 0) { sprintf(filename,"rigid_body_mode%02d",Nrigid+1); while( (fp = fopen(filename,"r")) != NULL) { which_filename = 1; fclose(fp); Nrigid++; sprintf(filename,"rigid_body_mode%02d",Nrigid+1); } sprintf(filename,"rigid_body_mode%d",Nrigid+1); while( (fp = fopen(filename,"r")) != NULL) { fclose(fp); Nrigid++; sprintf(filename,"rigid_body_mode%d",Nrigid+1); } } Nrigid = AZ_gsum_int(Nrigid,proc_config); if (Nrigid != 0) { rigid = (double *) ML_allocate( sizeof(double)*Nrigid*(N_update+1) ); if (rigid == NULL) { printf("Error: Not enough space for rigid body modes\n"); } } rhs = (double *) malloc(leng*sizeof(double)); xxx = (double *) malloc(leng*sizeof(double)); for (iii = 0; iii < leng; iii++) xxx[iii] = 0.0; for (i = 0; i < Nrigid; i++) { if (which_filename == 1) sprintf(filename,"rigid_body_mode%02d",i+1); else sprintf(filename,"rigid_body_mode%d",i+1); AZ_input_msr_matrix(filename,update,&mode,&garbage,N_update,proc_config); AZ_reorder_vec(mode, data_org, update_index, NULL); /* here is something to stick a rigid body mode as the initial */ /* The idea is to solve A x = 0 without smoothing with a two */ /* level method. If everything is done properly, we should */ /* converge in 2 iterations. */ /* Note: we must also zero out components of the rigid body */ /* mode that correspond to Dirichlet bcs. */ if (i == -4) { for (iii = 0; iii < leng; iii++) xxx[iii] = mode[iii]; ccc = 0; Amatrix = &(ml->Amat[N_levels-1]); for (iii = 0; iii < Amatrix->outvec_leng; iii++) { ML_get_matrix_row(Amatrix,1,&iii,&allocated,&rowi_col,&rowi_val, &rowi_N, 0); count2 = 0; for (j = 0; j < rowi_N; j++) if (rowi_val[j] != 0.) count2++; if (count2 <= 1) { xxx[iii] = 0.; ccc++; } } free(rowi_col); free(rowi_val); allocated = 0; rowi_col = NULL; rowi_val = NULL; } /* * Rescale matrix/rigid body modes and checking * AZ_sym_rescale_sl(mode, Amat->data_org, options, proc_config, scaling); Amat->matvec(mode, rigid, Amat, proc_config); for (j = 0; j < N_update; j++) printf("this is %d %e\n",j,rigid[j]); */ /* Here is some code to check that the rigid body modes are */ /* really rigid body modes. The idea is to multiply by A and */ /* then to zero out things that we "think" are boundaries. */ /* In this hardwired example, things near boundaries */ /* correspond to matrix rows that do not have 81 nonzeros. */ /* Amode = (double *) malloc(leng*sizeof(double)); Amat->matvec(mode, Amode, Amat, proc_config); j = 0; biggest = 0.0; for (ii = 0; ii < N_update; ii++) { if ( Amat->bindx[ii+1] - Amat->bindx[ii] != 80) { Amode[ii] = 0.; j++; } else { if ( fabs(Amode[ii]) > biggest) { biggest=fabs(Amode[ii]); big_ind = ii; } } } printf("%d entries zeroed out of %d elements\n",j,N_update); alpha = AZ_gdot(N_update, Amode, Amode, proc_config); beta = AZ_gdot(N_update, mode, mode, proc_config); printf("||A r||^2 =%e, ||r||^2 = %e, ratio = %e\n", alpha,beta,alpha/beta); printf("the biggest is %e at row %d\n",biggest,big_ind); free(Amode); */ /* orthogonalize mode with respect to previous modes. */ for (j = 0; j < i; j++) { alpha = -AZ_gdot(N_update, mode, &(rigid[j*N_update]), proc_config)/ AZ_gdot(N_update, &(rigid[j*N_update]), &(rigid[j*N_update]), proc_config); /* daxpy_(&N_update,&alpha,&(rigid[j*N_update]), &one, mode, &one); */ } #ifndef MB_MODIF printf(" after mb %e %e %e\n",mode[0],mode[1],mode[2]); #endif for (j = 0; j < N_update; j++) rigid[i*N_update+j] = mode[j]; free(mode); free(garbage); garbage = NULL; } if (Nrigid != 0) { ML_Aggregate_Set_BlockDiagScaling(ag); ML_Aggregate_Set_NullSpace(ag, num_PDE_eqns, Nrigid, rigid, N_update); free(rigid); } #ifdef SCALE_ME ML_Aggregate_Scale_NullSpace(ag, scaling_vect, N_update); #endif coarsest_level = ML_Gen_MGHierarchy_UsingAggregation(ml, N_levels-1, ML_DECREASING, ag); AZ_defaults(options, params); coarsest_level = N_levels - coarsest_level; if ( proc_config[AZ_node] == 0 ) printf("Coarse level = %d \n", coarsest_level); /* set up smoothers */ for (level = N_levels-1; level > coarsest_level; level--) { /* ML_Gen_Smoother_BlockGaussSeidel(ml, level,ML_BOTH, 1, 1., num_PDE_eqns); */ /* Sparse approximate inverse smoother that acutally does both */ /* pre and post smoothing. */ /* ML_Gen_Smoother_ParaSails(ml , level, ML_PRESMOOTHER, nsmooth, parasails_sym, parasails_thresh, parasails_nlevels, parasails_filter, parasails_loadbal, parasails_factorized); */ /* This is the symmetric Gauss-Seidel smoothing that we usually use. */ /* In parallel, it is not a true Gauss-Seidel in that each processor */ /* does a Gauss-Seidel on its local submatrix independent of the */ /* other processors. */ /* ML_Gen_Smoother_Cheby(ml, level, ML_BOTH, 30., nsmooth); */ Ndof = ml->Amat[level].invec_leng; ML_Gen_Blocks_Aggregates(ag, level, &nBlocks, &blockIndices); ML_Gen_Smoother_BlockDiagScaledCheby(ml, level, ML_BOTH, 30.,nsmooth, nBlocks, blockIndices); /* ML_Gen_Smoother_SymGaussSeidel(ml , level, ML_BOTH, nsmooth,1.); */ /* This is a true Gauss Seidel in parallel. This seems to work for */ /* elasticity problems. However, I don't believe that this is very */ /* efficient in parallel. */ /* nblocks = ml->Amat[level].invec_leng/num_PDE_eqns; blocks = (int *) ML_allocate(sizeof(int)*N_update); for (i =0; i < ml->Amat[level].invec_leng; i++) blocks[i] = i/num_PDE_eqns; ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml , level, ML_PRESMOOTHER, nsmooth, 1., nblocks, blocks); ML_Gen_Smoother_VBlockSymGaussSeidelSequential(ml, level, ML_POSTSMOOTHER, nsmooth, 1., nblocks, blocks); free(blocks); */ /* Block Jacobi Smoothing */ /* nblocks = ml->Amat[level].invec_leng/num_PDE_eqns; blocks = (int *) ML_allocate(sizeof(int)*N_update); for (i =0; i < ml->Amat[level].invec_leng; i++) blocks[i] = i/num_PDE_eqns; ML_Gen_Smoother_VBlockJacobi(ml , level, ML_BOTH, nsmooth, ML_ONE_STEP_CG, nblocks, blocks); free(blocks); */ /* Jacobi Smoothing */ /* ML_Gen_Smoother_Jacobi(ml , level, ML_PRESMOOTHER, nsmooth, ML_ONE_STEP_CG); ML_Gen_Smoother_Jacobi(ml , level, ML_POSTSMOOTHER, nsmooth,ML_ONE_STEP_CG); */ /* This does a block Gauss-Seidel (not true GS in parallel) */ /* where each processor has 'nblocks' blocks. */ /* nblocks = 250; ML_Gen_Blocks_Metis(ml, level, &nblocks, &blocks); ML_Gen_Smoother_VBlockJacobi(ml , level, ML_BOTH, nsmooth,ML_ONE_STEP_CG, nblocks, blocks); free(blocks); */ num_PDE_eqns = 6; } /* Choose coarse grid solver: mls, superlu, symGS, or Aztec */ /* ML_Gen_Smoother_Cheby(ml, coarsest_level, ML_BOTH, 30., nsmooth); ML_Gen_CoarseSolverSuperLU( ml, coarsest_level); */ /* ML_Gen_Smoother_SymGaussSeidel(ml , coarsest_level, ML_BOTH, nsmooth,1.); */ old_prec = options[AZ_precond]; old_sol = options[AZ_solver]; old_tol = params[AZ_tol]; params[AZ_tol] = 1.0e-9; params[AZ_tol] = 1.0e-5; options[AZ_precond] = AZ_Jacobi; options[AZ_solver] = AZ_cg; options[AZ_poly_ord] = 1; options[AZ_conv] = AZ_r0; options[AZ_orth_kvecs] = AZ_TRUE; j = AZ_gsum_int(ml->Amat[coarsest_level].outvec_leng, proc_config); options[AZ_keep_kvecs] = j - 6; options[AZ_max_iter] = options[AZ_keep_kvecs]; ML_Gen_SmootherAztec(ml, coarsest_level, options, params, proc_config, status, options[AZ_keep_kvecs], ML_PRESMOOTHER, NULL); options[AZ_conv] = AZ_noscaled; options[AZ_keep_kvecs] = 0; options[AZ_orth_kvecs] = 0; options[AZ_precond] = old_prec; options[AZ_solver] = old_sol; params[AZ_tol] = old_tol; /* */ #ifdef RST_MODIF ML_Gen_Solver(ml, ML_MGV, N_levels-1, coarsest_level); #else #ifdef MB_MODIF ML_Gen_Solver(ml, ML_SAAMG, N_levels-1, coarsest_level); #else ML_Gen_Solver(ml, ML_MGFULLV, N_levels-1, coarsest_level); #endif #endif options[AZ_solver] = AZ_GMRESR; options[AZ_solver] = AZ_cg; options[AZ_scaling] = AZ_none; options[AZ_precond] = AZ_user_precond; options[AZ_conv] = AZ_r0; options[AZ_conv] = AZ_noscaled; options[AZ_output] = 1; options[AZ_max_iter] = 500; options[AZ_poly_ord] = 5; options[AZ_kspace] = 40; params[AZ_tol] = 4.8e-6; AZ_set_ML_preconditioner(&Pmat, Amat, ml, options); setup_time = AZ_second() - start_time; /* Set rhs */ fp = fopen("AZ_capture_rhs.dat","r"); if (fp == NULL) { AZ_random_vector(rhs, data_org, proc_config); if (proc_config[AZ_node] == 0) printf("taking random vector for rhs\n"); for (i = 0; i < -N_update; i++) { rhs[i] = (double) update[i]; rhs[i] = 7.; } } else { if (proc_config[AZ_node]== 0) printf("reading rhs guess from file\n"); AZ_input_msr_matrix("AZ_capture_rhs.dat", update, &rhs, &garbage, N_update, proc_config); free(garbage); } AZ_reorder_vec(rhs, data_org, update_index, NULL); printf("changing rhs by multiplying with A\n"); Amat->matvec(rhs, xxx, Amat, proc_config); for (i = 0; i < N_update; i++) rhs[i] = xxx[i]; fp = fopen("AZ_capture_init_guess.dat","r"); if (fp != NULL) { fclose(fp); if (proc_config[AZ_node]== 0) printf("reading initial guess from file\n"); AZ_input_msr_matrix("AZ_capture_init_guess.dat", update, &xxx, &garbage, N_update, proc_config); free(garbage); xxx = (double *) realloc(xxx, sizeof(double)*( Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border] + Amat->data_org[AZ_N_external])); } AZ_reorder_vec(xxx, data_org, update_index, NULL); /* if Dirichlet BC ... put the answer in */ /* for (i = 0; i < data_org[AZ_N_internal]+data_org[AZ_N_border]; i++) { if ( (val[i] > .99999999) && (val[i] < 1.0000001)) xxx[i] = rhs[i]; } */ fp = fopen("AZ_no_multilevel.dat","r"); scaling = AZ_scaling_create(); start_time = AZ_second(); if (fp != NULL) { fclose(fp); options[AZ_precond] = AZ_none; options[AZ_scaling] = AZ_sym_diag; options[AZ_ignore_scaling] = AZ_TRUE; options[AZ_keep_info] = 1; AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); /* options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, NULL, scaling); */ } else { options[AZ_keep_info] = 1; options[AZ_conv] = AZ_noscaled; options[AZ_conv] = AZ_r0; params[AZ_tol] = 1.0e-7; /* ML_Iterate(ml, xxx, rhs); */ alpha = sqrt(AZ_gdot(N_update, xxx, xxx, proc_config)); printf("init guess = %e\n",alpha); alpha = sqrt(AZ_gdot(N_update, rhs, rhs, proc_config)); printf("rhs = %e\n",alpha); #ifdef SCALE_ME ML_MSR_scalerhs(rhs, scaling_vect, data_org[AZ_N_internal] + data_org[AZ_N_border]); ML_MSR_scalesol(xxx, scaling_vect, data_org[AZ_N_internal] + data_org[AZ_N_border]); #endif max_diag = 0.; min_diag = 1.e30; max_sum = 0.; for (i = 0; i < N_update; i++) { if (Amat->val[i] < 0.) printf("woops negative diagonal A(%d,%d) = %e\n", i,i,Amat->val[i]); if (Amat->val[i] > max_diag) max_diag = Amat->val[i]; if (Amat->val[i] < min_diag) min_diag = Amat->val[i]; sum = fabs(Amat->val[i]); for (j = Amat->bindx[i]; j < Amat->bindx[i+1]; j++) { sum += fabs(Amat->val[j]); } if (sum > max_sum) max_sum = sum; } printf("Largest diagonal = %e, min diag = %e large abs row sum = %e\n", max_diag, min_diag, max_sum); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); options[AZ_pre_calc] = AZ_reuse; options[AZ_conv] = AZ_expected_values; /* if (proc_config[AZ_node] == 0) printf("\n-------- Second solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); if (proc_config[AZ_node] == 0) printf("\n-------- Third solve with improved convergence test -----\n"); AZ_iterate(xxx, rhs, options, params, status, proc_config, Amat, Pmat, scaling); */ } solve_time = AZ_second() - start_time; if (proc_config[AZ_node] == 0) printf("Solve time = %e, MG Setup time = %e\n", solve_time, setup_time); if (proc_config[AZ_node] == 0) printf("Printing out a few entries of the solution ...\n"); for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 7) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 23) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 47) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 101) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} j = AZ_gsum_int(7, proc_config); /* sync processors */ for (j=0;j<Amat->data_org[AZ_N_internal]+ Amat->data_org[AZ_N_border];j++) if (update[j] == 171) {printf("solution(gid = %d) = %10.4e\n", update[j],xxx[update_index[j]]); fflush(stdout);} ML_Aggregate_Destroy(&ag); ML_Destroy(&ml); AZ_free((void *) Amat->data_org); AZ_free((void *) Amat->val); AZ_free((void *) Amat->bindx); AZ_free((void *) update); AZ_free((void *) external); AZ_free((void *) extern_index); AZ_free((void *) update_index); AZ_scaling_destroy(&scaling); if (Amat != NULL) AZ_matrix_destroy(&Amat); if (Pmat != NULL) AZ_precond_destroy(&Pmat); free(xxx); free(rhs); #ifdef HAVE_MPI MPI_Finalize(); #endif return 0; }
/*----------------------------------------------------------------------* | Destructor (public) m.gee 01/05| *----------------------------------------------------------------------*/ ML_NOX::ML_Nox_NonlinearLevel::~ML_Nox_NonlinearLevel() { // destroying this level's operator depends on ismatrixfree_ and on the level if (ismatrixfree_==false) { if (level_ == 0) { SmootherA_ = 0; } else if (SmootherA_) { delete SmootherA_; SmootherA_ = 0; } else { cout << "**ERR**: ML_NOX::ML_Nox_NonlinearLevel::~ML_Nox_NonlinearLevel:\n" << "**ERR**: something weird happened while destroying SmootherA_ on level " << level_ << "\n" << "**ERR**: file/line: " << __FILE__ << "/" << __LINE__ << "\n"; throw -1; } } else // ismatrixfree==true; { // this is just a ptr to the finite difference constructed matrix // the matrixfree level is charge of destroying it SmootherA_ = 0; } // in the matrixfree case, the coarseinterface is owned by the matrixfree // class of this level and is destroyed there if (ismatrixfree_==false) { if (coarseinterface_) delete coarseinterface_; coarseinterface_ = 0; } else coarseinterface_ = 0; if (coarseprepost_) delete coarseprepost_; coarseprepost_ = 0; if (thislevel_ag_) ML_Aggregate_Destroy(&thislevel_ag_); thislevel_ag_ = 0; if (thislevel_ml_) ML_Destroy(&thislevel_ml_); thislevel_ml_ = 0; if (thislevel_prec_) delete thislevel_prec_; thislevel_prec_ = 0; if (xthis_) delete xthis_; xthis_ = 0; if (thislevel_A_) delete thislevel_A_; thislevel_A_ = 0; if (thislevel_linSys_) delete thislevel_linSys_; thislevel_linSys_ = 0; if (azlinSys_) delete azlinSys_; azlinSys_ = 0; if (clone_) delete clone_; clone_ = 0; if (nlParams_) delete nlParams_; nlParams_ = 0; if (absresid_) delete absresid_; absresid_ = 0; if (nupdate_) delete nupdate_; nupdate_ = 0; if (fv_) delete fv_; fv_ = 0; if (maxiters_) delete maxiters_; maxiters_ = 0; if (combo1_) delete combo1_; combo1_ = 0; if (combo2_) delete combo2_; combo2_ = 0; if (group_) delete group_; group_ = 0; if (initialGuess_) delete initialGuess_; initialGuess_ = 0; if (solver_) delete solver_; solver_ = 0; if (Broyd_) delete Broyd_; Broyd_ = 0; return; }
int main(int argc, char *argv[]){ ML *ml_object; int i, N_grids = 3, N_levels; double sol[129], rhs[129]; ML_Aggregate *agg_object; ML_Operator *data; ML_Krylov *kdata; #ifdef ML_MPI MPI_Init(&argc,&argv); #endif for (i = 0; i < 129; i++) sol[i] = 0.; for (i = 0; i < 129; i++) rhs[i] = 2.; ML_Create (&ml_object, N_grids); ML_Init_Amatrix (ml_object, 0, 129, 129, NULL); ML_Set_Amatrix_Getrow(ml_object, 0, Poisson_getrow, NULL, 129); ML_Set_Amatrix_Matvec(ml_object, 0, Poisson_matvec); ML_Set_PrintLevel(10); ML_Aggregate_Create(&agg_object); ML_Aggregate_Set_MaxCoarseSize(agg_object,1); N_levels = ML_Gen_MGHierarchy_UsingAggregation(ml_object, 0, ML_INCREASING, agg_object); /******** Begin code to set a Jacobi smoother ****** ML_Gen_Smoother_Jacobi(ml_object, ML_ALL_LEVELS, ML_PRESMOOTHER, 1, ML_DEFAULT); ******** End code to set a Jacobi smoother ******/ /******** Begin code to set a user-defined smoother ******/ ML_Get_Amatrix(ml_object, 0, &data); ML_Set_Smoother(ml_object, 0, ML_BOTH, data, user_smoothing,"mine"); ML_Get_Amatrix(ml_object, 1, &data); ML_Set_Smoother(ml_object, 1, ML_BOTH, data, user_smoothing,"mine"); ML_Get_Amatrix(ml_object, 2, &data); ML_Set_Smoother(ml_object, 2, ML_BOTH, data, user_smoothing,"mine"); ML_Gen_Solver (ml_object, ML_MGV, 0, N_levels-1); /* This example uses an internal CG solver within ML */ /* ML has limited Krylov methods support. It is intended */ /* that ML be used with another package that supplies */ /* more sophisticated Krylov solver options (such as those */ /* found in the Trilinos or Aztec packages. */ kdata = ML_Krylov_Create(ml_object->comm); ML_Krylov_Set_PrintFreq( kdata, 1 ); ML_Krylov_Set_Method(kdata, ML_CG); ML_Krylov_Set_Amatrix(kdata, &(ml_object->Amat[0])); ML_Krylov_Set_PreconFunc(kdata, ML_MGVSolve_Wrapper); ML_Krylov_Set_Precon(kdata, ml_object); ML_Krylov_Set_Tolerance(kdata, 1.e-7); ML_Krylov_Solve(kdata, 129, rhs, sol); ML_Krylov_Destroy( &kdata ); ML_Aggregate_Destroy(&agg_object); ML_Destroy(&ml_object); /******** End code to set a user-defined smoother ******/ printf("answer is %e %e %e %e %e\n",sol[0],sol[1],sol[2],sol[3],sol[4]); #ifdef ML_MPI MPI_Finalize(); #endif exit(EXIT_SUCCESS); }