void ParGridFunction::ExchangeFaceNbrData() { pfes->ExchangeFaceNbrData(); if (pfes->GetFaceNbrVSize() <= 0) { return; } ParMesh *pmesh = pfes->GetParMesh(); face_nbr_data.SetSize(pfes->GetFaceNbrVSize()); Vector send_data(pfes->send_face_nbr_ldof.Size_of_connections()); int *send_offset = pfes->send_face_nbr_ldof.GetI(); int *send_ldof = pfes->send_face_nbr_ldof.GetJ(); int *recv_offset = pfes->face_nbr_ldof.GetI(); MPI_Comm MyComm = pfes->GetComm(); int num_face_nbrs = pmesh->GetNFaceNeighbors(); MPI_Request *requests = new MPI_Request[2*num_face_nbrs]; MPI_Request *send_requests = requests; MPI_Request *recv_requests = requests + num_face_nbrs; MPI_Status *statuses = new MPI_Status[num_face_nbrs]; for (int i = 0; i < send_data.Size(); i++) { send_data[i] = data[send_ldof[i]]; } for (int fn = 0; fn < num_face_nbrs; fn++) { int nbr_rank = pmesh->GetFaceNbrRank(fn); int tag = 0; MPI_Isend(&send_data(send_offset[fn]), send_offset[fn+1] - send_offset[fn], MPI_DOUBLE, nbr_rank, tag, MyComm, &send_requests[fn]); MPI_Irecv(&face_nbr_data(recv_offset[fn]), recv_offset[fn+1] - recv_offset[fn], MPI_DOUBLE, nbr_rank, tag, MyComm, &recv_requests[fn]); } MPI_Waitall(num_face_nbrs, send_requests, statuses); MPI_Waitall(num_face_nbrs, recv_requests, statuses); delete [] statuses; delete [] requests; }
void ParNonlinearForm::Mult(const Vector &x, Vector &y) const { NonlinearForm::Mult(x, y); // x --(P)--> aux1 --(A_local)--> aux2 Y.SetData(aux2.GetData()); // aux2 contains A_local.P.x if (fnfi.Size()) { // Terms over shared interior faces in parallel. ParFiniteElementSpace *pfes = ParFESpace(); ParMesh *pmesh = pfes->GetParMesh(); FaceElementTransformations *tr; const FiniteElement *fe1, *fe2; Array<int> vdofs1, vdofs2; Vector el_x, el_y; X.SetData(aux1.GetData()); // aux1 contains P.x X.ExchangeFaceNbrData(); const int n_shared_faces = pmesh->GetNSharedFaces(); for (int i = 0; i < n_shared_faces; i++) { tr = pmesh->GetSharedFaceTransformations(i, true); fe1 = pfes->GetFE(tr->Elem1No); fe2 = pfes->GetFaceNbrFE(tr->Elem2No); pfes->GetElementVDofs(tr->Elem1No, vdofs1); pfes->GetFaceNbrElementVDofs(tr->Elem2No, vdofs2); el_x.SetSize(vdofs1.Size() + vdofs2.Size()); X.GetSubVector(vdofs1, el_x.GetData()); X.FaceNbrData().GetSubVector(vdofs2, el_x.GetData() + vdofs1.Size()); for (int k = 0; k < fnfi.Size(); k++) { fnfi[k]->AssembleFaceVector(*fe1, *fe2, *tr, el_x, el_y); Y.AddElementVector(vdofs1, el_y.GetData()); } } } P->MultTranspose(Y, y); for (int i = 0; i < ess_tdof_list.Size(); i++) { y(ess_tdof_list[i]) = 0.0; } }
void VisualizeMesh(socketstream &sock, const char *vishost, int visport, ParMesh &pmesh, const char *title, int x, int y, int w, int h, const char *keys, bool vec) { MPI_Comm comm = pmesh.GetComm(); int num_procs, myid; MPI_Comm_size(comm, &num_procs); MPI_Comm_rank(comm, &myid); bool newly_opened = false; int connection_failed; do { if (myid == 0) { if (!sock.is_open() || !sock) { sock.open(vishost, visport); sock.precision(8); newly_opened = true; } sock << "solution\n"; } pmesh.PrintAsOne(sock); if (myid == 0 && newly_opened) { sock << "window_title '" << title << "'\n" << "window_geometry " << x << " " << y << " " << w << " " << h << "\n"; if ( keys ) { sock << "keys " << keys << "\n"; } else { sock << "keys maaAc"; } if ( vec ) { sock << "vvv"; } sock << endl; } if (myid == 0) { connection_failed = !sock && !newly_opened; } MPI_Bcast(&connection_failed, 1, MPI_INT, 0, comm); } while (connection_failed); }
int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. const char *mesh_file = "../../data/star.mesh"; int order = 1; bool set_bc = true; bool static_cond = false; bool hybridization = false; bool visualization = 1; bool use_petsc = true; const char *petscrc_file = ""; bool use_nonoverlapping = false; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree)."); args.AddOption(&set_bc, "-bc", "--impose-bc", "-no-bc", "--dont-impose-bc", "Impose or not essential boundary conditions."); args.AddOption(&freq, "-f", "--frequency", "Set the frequency for the exact" " solution."); args.AddOption(&static_cond, "-sc", "--static-condensation", "-no-sc", "--no-static-condensation", "Enable static condensation."); args.AddOption(&hybridization, "-hb", "--hybridization", "-no-hb", "--no-hybridization", "Enable hybridization."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.AddOption(&use_petsc, "-usepetsc", "--usepetsc", "-no-petsc", "--no-petsc", "Use or not PETSc to solve the linear system."); args.AddOption(&petscrc_file, "-petscopts", "--petscopts", "PetscOptions file to use."); args.AddOption(&use_nonoverlapping, "-nonoverlapping", "--nonoverlapping", "-no-nonoverlapping", "--no-nonoverlapping", "Use or not the block diagonal PETSc's matrix format " "for non-overlapping domain decomposition."); args.Parse(); if (!args.Good()) { if (myid == 0) { args.PrintUsage(cout); } MPI_Finalize(); return 1; } if (myid == 0) { args.PrintOptions(cout); } // 2b. We initialize PETSc if (use_petsc) { MFEMInitializePetsc(NULL,NULL,petscrc_file,NULL); } kappa = freq * M_PI; // 3. Read the (serial) mesh from the given mesh file on all processors. We // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface // and volume, as well as periodic meshes with the same code. Mesh *mesh = new Mesh(mesh_file, 1, 1); int dim = mesh->Dimension(); int sdim = mesh->SpaceDimension(); // 4. Refine the serial mesh on all processors to increase the resolution. In // this example we do 'ref_levels' of uniform refinement. We choose // 'ref_levels' to be the largest number that gives a final mesh with no // more than 1,000 elements. { int ref_levels = (int)floor(log(1000./mesh->GetNE())/log(2.)/dim); for (int l = 0; l < ref_levels; l++) { mesh->UniformRefinement(); } } // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution. Once the // parallel mesh is defined, the serial mesh can be deleted. Tetrahedral // meshes need to be reoriented before we can define high-order Nedelec // spaces on them (this is needed in the ADS solver below). ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; { int par_ref_levels = 2; for (int l = 0; l < par_ref_levels; l++) { pmesh->UniformRefinement(); } } pmesh->ReorientTetMesh(); // 6. Define a parallel finite element space on the parallel mesh. Here we // use the Raviart-Thomas finite elements of the specified order. FiniteElementCollection *fec = new RT_FECollection(order-1, dim); ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec); HYPRE_Int size = fespace->GlobalTrueVSize(); if (myid == 0) { cout << "Number of finite element unknowns: " << size << endl; } // 7. Determine the list of true (i.e. parallel conforming) essential // boundary dofs. In this example, the boundary conditions are defined // by marking all the boundary attributes from the mesh as essential // (Dirichlet) and converting them to a list of true dofs. Array<int> ess_tdof_list; if (pmesh->bdr_attributes.Size()) { Array<int> ess_bdr(pmesh->bdr_attributes.Max()); ess_bdr = set_bc ? 1 : 0; fespace->GetEssentialTrueDofs(ess_bdr, ess_tdof_list); } // 8. Set up the parallel linear form b(.) which corresponds to the // right-hand side of the FEM linear system, which in this case is // (f,phi_i) where f is given by the function f_exact and phi_i are the // basis functions in the finite element fespace. VectorFunctionCoefficient f(sdim, f_exact); ParLinearForm *b = new ParLinearForm(fespace); b->AddDomainIntegrator(new VectorFEDomainLFIntegrator(f)); b->Assemble(); // 9. Define the solution vector x as a parallel finite element grid function // corresponding to fespace. Initialize x by projecting the exact // solution. Note that only values from the boundary faces will be used // when eliminating the non-homogeneous boundary condition to modify the // r.h.s. vector b. ParGridFunction x(fespace); VectorFunctionCoefficient F(sdim, F_exact); x.ProjectCoefficient(F); // 10. Set up the parallel bilinear form corresponding to the H(div) // diffusion operator grad alpha div + beta I, by adding the div-div and // the mass domain integrators. Coefficient *alpha = new ConstantCoefficient(1.0); Coefficient *beta = new ConstantCoefficient(1.0); ParBilinearForm *a = new ParBilinearForm(fespace); a->AddDomainIntegrator(new DivDivIntegrator(*alpha)); a->AddDomainIntegrator(new VectorFEMassIntegrator(*beta)); // 11. Assemble the parallel bilinear form and the corresponding linear // system, applying any necessary transformations such as: parallel // assembly, eliminating boundary conditions, applying conforming // constraints for non-conforming AMR, static condensation, // hybridization, etc. FiniteElementCollection *hfec = NULL; ParFiniteElementSpace *hfes = NULL; if (static_cond) { a->EnableStaticCondensation(); } else if (hybridization) { hfec = new DG_Interface_FECollection(order-1, dim); hfes = new ParFiniteElementSpace(pmesh, hfec); a->EnableHybridization(hfes, new NormalTraceJumpIntegrator(), ess_tdof_list); } a->Assemble(); Vector B, X; CGSolver *pcg = new CGSolver(MPI_COMM_WORLD); pcg->SetRelTol(1e-12); pcg->SetMaxIter(500); pcg->SetPrintLevel(1); if (!use_petsc) { HypreParMatrix A; a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B); HYPRE_Int glob_size = A.GetGlobalNumRows(); if (myid == 0) { cout << "Size of linear system: " << glob_size << endl; } // 12. Define and apply a parallel PCG solver for A X = B with the 2D AMS or // the 3D ADS preconditioners from hypre. If using hybridization, the // system is preconditioned with hypre's BoomerAMG. HypreSolver *prec = NULL; pcg->SetOperator(A); if (hybridization) { prec = new HypreBoomerAMG(A); } else { ParFiniteElementSpace *prec_fespace = (a->StaticCondensationIsEnabled() ? a->SCParFESpace() : fespace); if (dim == 2) { prec = new HypreAMS(A, prec_fespace); } else { prec = new HypreADS(A, prec_fespace); } } pcg->SetPreconditioner(*prec); pcg->Mult(B, X); delete prec; } else { PetscParMatrix A; PetscPreconditioner *prec = NULL; a->SetOperatorType(use_nonoverlapping ? Operator::PETSC_MATIS : Operator::PETSC_MATAIJ); a->FormLinearSystem(ess_tdof_list, x, *b, A, X, B); if (myid == 0) { cout << "Size of linear system: " << A.M() << endl; } pcg->SetOperator(A); if (use_nonoverlapping) { ParFiniteElementSpace *prec_fespace = (a->StaticCondensationIsEnabled() ? a->SCParFESpace() : fespace); // Auxiliary class for BDDC customization PetscBDDCSolverParams opts; // Inform the solver about the finite element space opts.SetSpace(prec_fespace); // Inform the solver about essential dofs opts.SetEssBdrDofs(&ess_tdof_list); // Create a BDDC solver with parameters prec = new PetscBDDCSolver(A, opts); } else { // Create an empty preconditioner that can be customized at runtime. prec = new PetscPreconditioner(A, "solver_"); } pcg->SetPreconditioner(*prec); pcg->Mult(B, X); delete prec; } delete pcg; // 13. Recover the parallel grid function corresponding to X. This is the // local finite element solution on each processor. a->RecoverFEMSolution(X, *b, x); // 14. Compute and print the L^2 norm of the error. { double err = x.ComputeL2Error(F); if (myid == 0) { cout << "\n|| F_h - F ||_{L^2} = " << err << '\n' << endl; } } // 15. Save the refined mesh and the solution in parallel. This output can // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol". { ostringstream mesh_name, sol_name; mesh_name << "mesh." << setfill('0') << setw(6) << myid; sol_name << "sol." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); ofstream sol_ofs(sol_name.str().c_str()); sol_ofs.precision(8); x.Save(sol_ofs); } // 16. Send the solution by socket to a GLVis server. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream sol_sock(vishost, visport); sol_sock << "parallel " << num_procs << " " << myid << "\n"; sol_sock.precision(8); sol_sock << "solution\n" << *pmesh << x << flush; } // 17. Free the used memory. delete hfes; delete hfec; delete a; delete alpha; delete beta; delete b; delete fespace; delete fec; delete pmesh; // We finalize PETSc if (use_petsc) { MFEMFinalizePetsc(); } MPI_Finalize(); return 0; }
int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. const char *mesh_file = "../data/beam-tet.mesh"; int order = 1; bool visualization = 1; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree)."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.Parse(); if (!args.Good()) { if (myid == 0) args.PrintUsage(cout); MPI_Finalize(); return 1; } if (myid == 0) args.PrintOptions(cout); // 3. Read the (serial) mesh from the given mesh file on all processors. We // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface // and volume meshes with the same code. Mesh *mesh; ifstream imesh(mesh_file); if (!imesh) { if (myid == 0) cerr << "\nCan not open mesh file: " << mesh_file << '\n' << endl; MPI_Finalize(); return 2; } mesh = new Mesh(imesh, 1, 1); imesh.close(); int dim = mesh->Dimension(); if (dim != 3) { if (myid == 0) cerr << "\nThis example requires a 3D mesh\n" << endl; MPI_Finalize(); return 3; } // 4. Refine the serial mesh on all processors to increase the resolution. In // this example we do 'ref_levels' of uniform refinement. We choose // 'ref_levels' to be the largest number that gives a final mesh with no // more than 1,000 elements. { int ref_levels = (int)floor(log(1000./mesh->GetNE())/log(2.)/dim); for (int l = 0; l < ref_levels; l++) mesh->UniformRefinement(); } // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution. Once the // parallel mesh is defined, the serial mesh can be deleted. Tetrahedral // meshes need to be reoriented before we can define high-order Nedelec // spaces on them. ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; { int par_ref_levels = 2; for (int l = 0; l < par_ref_levels; l++) pmesh->UniformRefinement(); } pmesh->ReorientTetMesh(); // 6. Define a parallel finite element space on the parallel mesh. Here we // use the lowest order Nedelec finite elements, but we can easily switch // to higher-order spaces by changing the value of p. FiniteElementCollection *fec = new ND_FECollection(order, dim); ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec); int size = fespace->GlobalTrueVSize(); if (myid == 0) cout << "Number of unknowns: " << size << endl; // 7. Set up the parallel linear form b(.) which corresponds to the // right-hand side of the FEM linear system, which in this case is // (f,phi_i) where f is given by the function f_exact and phi_i are the // basis functions in the finite element fespace. VectorFunctionCoefficient f(3, f_exact); ParLinearForm *b = new ParLinearForm(fespace); b->AddDomainIntegrator(new VectorFEDomainLFIntegrator(f)); b->Assemble(); // 8. Define the solution vector x as a parallel finite element grid function // corresponding to fespace. Initialize x by projecting the exact // solution. Note that only values from the boundary edges will be used // when eliminating the non-homogeneous boundary condition to modify the // r.h.s. vector b. ParGridFunction x(fespace); VectorFunctionCoefficient E(3, E_exact); x.ProjectCoefficient(E); // 9. Set up the parallel bilinear form corresponding to the EM diffusion // operator curl muinv curl + sigma I, by adding the curl-curl and the // mass domain integrators and finally imposing non-homogeneous Dirichlet // boundary conditions. The boundary conditions are implemented by // marking all the boundary attributes from the mesh as essential // (Dirichlet). After serial and parallel assembly we extract the // parallel matrix A. Coefficient *muinv = new ConstantCoefficient(1.0); Coefficient *sigma = new ConstantCoefficient(1.0); ParBilinearForm *a = new ParBilinearForm(fespace); a->AddDomainIntegrator(new CurlCurlIntegrator(*muinv)); a->AddDomainIntegrator(new VectorFEMassIntegrator(*sigma)); a->Assemble(); Array<int> ess_bdr(pmesh->bdr_attributes.Max()); ess_bdr = 1; a->EliminateEssentialBC(ess_bdr, x, *b); a->Finalize(); // 10. Define the parallel (hypre) matrix and vectors representing a(.,.), // b(.) and the finite element approximation. HypreParMatrix *A = a->ParallelAssemble(); HypreParVector *B = b->ParallelAssemble(); HypreParVector *X = x.ParallelAverage(); *X = 0.0; delete a; delete sigma; delete muinv; delete b; // 11. Define and apply a parallel PCG solver for AX=B with the AMS // preconditioner from hypre. HypreSolver *ams = new HypreAMS(*A, fespace); HyprePCG *pcg = new HyprePCG(*A); pcg->SetTol(1e-12); pcg->SetMaxIter(500); pcg->SetPrintLevel(2); pcg->SetPreconditioner(*ams); pcg->Mult(*B, *X); // 12. Extract the parallel grid function corresponding to the finite element // approximation X. This is the local solution on each processor. x = *X; // 13. Compute and print the L^2 norm of the error. { double err = x.ComputeL2Error(E); if (myid == 0) cout << "\n|| E_h - E ||_{L^2} = " << err << '\n' << endl; } // 14. Save the refined mesh and the solution in parallel. This output can // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol". { ostringstream mesh_name, sol_name; mesh_name << "mesh." << setfill('0') << setw(6) << myid; sol_name << "sol." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); ofstream sol_ofs(sol_name.str().c_str()); sol_ofs.precision(8); x.Save(sol_ofs); } // 15. Send the solution by socket to a GLVis server. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream sol_sock(vishost, visport); sol_sock << "parallel " << num_procs << " " << myid << "\n"; sol_sock.precision(8); sol_sock << "solution\n" << *pmesh << x << flush; } // 16. Free the used memory. delete pcg; delete ams; delete X; delete B; delete A; delete fespace; delete fec; delete pmesh; MPI_Finalize(); return 0; }
int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. int elem_type = 1; int ref_levels = 2; int amr = 0; int order = 2; bool always_snap = false; bool visualization = 1; OptionsParser args(argc, argv); args.AddOption(&elem_type, "-e", "--elem", "Type of elements to use: 0 - triangles, 1 - quads."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree)."); args.AddOption(&ref_levels, "-r", "--refine", "Number of times to refine the mesh uniformly."); args.AddOption(&amr, "-amr", "--refine-locally", "Additional local (non-conforming) refinement:" " 1 = refine around north pole, 2 = refine randomly."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.AddOption(&always_snap, "-snap", "--always-snap", "-no-snap", "--snap-at-the-end", "If true, snap nodes to the sphere initially and after each refinement " "otherwise, snap only after the last refinement"); args.Parse(); if (!args.Good()) { if (myid == 0) { args.PrintUsage(cout); } MPI_Finalize(); return 1; } if (myid == 0) { args.PrintOptions(cout); } // 3. Generate an initial high-order (surface) mesh on the unit sphere. The // Mesh object represents a 2D mesh in 3 spatial dimensions. We first add // the elements and the vertices of the mesh, and then make it high-order // by specifying a finite element space for its nodes. int Nvert = 8, Nelem = 6; if (elem_type == 0) { Nvert = 6; Nelem = 8; } Mesh *mesh = new Mesh(2, Nvert, Nelem, 0, 3); if (elem_type == 0) // inscribed octahedron { const double tri_v[6][3] = { { 1, 0, 0}, { 0, 1, 0}, {-1, 0, 0}, { 0, -1, 0}, { 0, 0, 1}, { 0, 0, -1} }; const int tri_e[8][3] = { {0, 1, 4}, {1, 2, 4}, {2, 3, 4}, {3, 0, 4}, {1, 0, 5}, {2, 1, 5}, {3, 2, 5}, {0, 3, 5} }; for (int j = 0; j < Nvert; j++) { mesh->AddVertex(tri_v[j]); } for (int j = 0; j < Nelem; j++) { int attribute = j + 1; mesh->AddTriangle(tri_e[j], attribute); } mesh->FinalizeTriMesh(1, 1, true); } else // inscribed cube { const double quad_v[8][3] = { {-1, -1, -1}, {+1, -1, -1}, {+1, +1, -1}, {-1, +1, -1}, {-1, -1, +1}, {+1, -1, +1}, {+1, +1, +1}, {-1, +1, +1} }; const int quad_e[6][4] = { {3, 2, 1, 0}, {0, 1, 5, 4}, {1, 2, 6, 5}, {2, 3, 7, 6}, {3, 0, 4, 7}, {4, 5, 6, 7} }; for (int j = 0; j < Nvert; j++) { mesh->AddVertex(quad_v[j]); } for (int j = 0; j < Nelem; j++) { int attribute = j + 1; mesh->AddQuad(quad_e[j], attribute); } mesh->FinalizeQuadMesh(1, 1, true); } // Set the space for the high-order mesh nodes. H1_FECollection fec(order, mesh->Dimension()); FiniteElementSpace nodal_fes(mesh, &fec, mesh->SpaceDimension()); mesh->SetNodalFESpace(&nodal_fes); // 4. Refine the mesh while snapping nodes to the sphere. Number of parallel // refinements is fixed to 2. for (int l = 0; l <= ref_levels; l++) { if (l > 0) // for l == 0 just perform snapping { mesh->UniformRefinement(); } // Snap the nodes of the refined mesh back to sphere surface. if (always_snap) { SnapNodes(*mesh); } } if (amr == 1) { for (int l = 0; l < 3; l++) { mesh->RefineAtVertex(Vertex(0, 0, 1)); } SnapNodes(*mesh); } else if (amr == 2) { for (int l = 0; l < 2; l++) { mesh->RandomRefinement(0.5); // 50% probability } SnapNodes(*mesh); } ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; { int par_ref_levels = 2; for (int l = 0; l < par_ref_levels; l++) { pmesh->UniformRefinement(); // Snap the nodes of the refined mesh back to sphere surface. if (always_snap) { SnapNodes(*pmesh); } } if (!always_snap || par_ref_levels < 1) { SnapNodes(*pmesh); } } if (amr == 1) { for (int l = 0; l < 2; l++) { pmesh->RefineAtVertex(Vertex(0, 0, 1)); } SnapNodes(*pmesh); } else if (amr == 2) { for (int l = 0; l < 2; l++) { pmesh->RandomRefinement(0.5); // 50% probability } SnapNodes(*pmesh); } // 5. Define a finite element space on the mesh. Here we use isoparametric // finite elements -- the same as the mesh nodes. ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, &fec); HYPRE_Int size = fespace->GlobalTrueVSize(); if (myid == 0) { cout << "Number of unknowns: " << size << endl; } // 6. Set up the linear form b(.) which corresponds to the right-hand side of // the FEM linear system, which in this case is (1,phi_i) where phi_i are // the basis functions in the finite element fespace. ParLinearForm *b = new ParLinearForm(fespace); ConstantCoefficient one(1.0); FunctionCoefficient rhs_coef (analytic_rhs); FunctionCoefficient sol_coef (analytic_solution); b->AddDomainIntegrator(new DomainLFIntegrator(rhs_coef)); b->Assemble(); // 7. Define the solution vector x as a finite element grid function // corresponding to fespace. Initialize x with initial guess of zero. ParGridFunction x(fespace); x = 0.0; // 8. Set up the bilinear form a(.,.) on the finite element space // corresponding to the Laplacian operator -Delta, by adding the Diffusion // and Mass domain integrators. ParBilinearForm *a = new ParBilinearForm(fespace); a->AddDomainIntegrator(new DiffusionIntegrator(one)); a->AddDomainIntegrator(new MassIntegrator(one)); // 9. Assemble the parallel linear system, applying any transformations // such as: parallel assembly, applying conforming constraints, etc. a->Assemble(); HypreParMatrix A; Vector B, X; Array<int> empty_tdof_list; a->FormLinearSystem(empty_tdof_list, x, *b, A, X, B); // 10. Define and apply a parallel PCG solver for AX=B with the BoomerAMG // preconditioner from hypre. Extract the parallel grid function x // corresponding to the finite element approximation X. This is the local // solution on each processor. HypreSolver *amg = new HypreBoomerAMG(A); HyprePCG *pcg = new HyprePCG(A); pcg->SetTol(1e-12); pcg->SetMaxIter(200); pcg->SetPrintLevel(2); pcg->SetPreconditioner(*amg); pcg->Mult(B, X); a->RecoverFEMSolution(X, *b, x); delete a; delete b; // 11. Compute and print the L^2 norm of the error. double err = x.ComputeL2Error(sol_coef); if (myid == 0) { cout << "\nL2 norm of error: " << err << endl; } // 12. Save the refined mesh and the solution. This output can be viewed // later using GLVis: "glvis -np <np> -m sphere_refined -g sol". { ostringstream mesh_name, sol_name; mesh_name << "sphere_refined." << setfill('0') << setw(6) << myid; sol_name << "sol." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); ofstream sol_ofs(sol_name.str().c_str()); sol_ofs.precision(8); x.Save(sol_ofs); } // 13. Send the solution by socket to a GLVis server. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream sol_sock(vishost, visport); sol_sock << "parallel " << num_procs << " " << myid << "\n"; sol_sock.precision(8); sol_sock << "solution\n" << *pmesh << x << flush; } // 14. Free the used memory. delete pcg; delete amg; delete fespace; delete pmesh; MPI_Finalize(); return 0; }
int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. const char *mesh_file = "../data/beam-quad.mesh"; int ser_ref_levels = 2; int par_ref_levels = 0; int order = 2; int ode_solver_type = 3; double t_final = 300.0; double dt = 3; double visc = 1e-2; bool visualization = true; int vis_steps = 1; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&ser_ref_levels, "-rs", "--refine-serial", "Number of times to refine the mesh uniformly in serial."); args.AddOption(&par_ref_levels, "-rp", "--refine-parallel", "Number of times to refine the mesh uniformly in parallel."); args.AddOption(&order, "-o", "--order", "Order (degree) of the finite elements."); args.AddOption(&ode_solver_type, "-s", "--ode-solver", "ODE solver: 1 - Backward Euler, 2 - SDIRK2, 3 - SDIRK3,\n\t" "\t 11 - Forward Euler, 12 - RK2, 13 - RK3 SSP, 14 - RK4."); args.AddOption(&t_final, "-tf", "--t-final", "Final time; start time is 0."); args.AddOption(&dt, "-dt", "--time-step", "Time step."); args.AddOption(&visc, "-v", "--viscosity", "Viscosity coefficient."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.AddOption(&vis_steps, "-vs", "--visualization-steps", "Visualize every n-th timestep."); args.Parse(); if (!args.Good()) { if (myid == 0) args.PrintUsage(cout); MPI_Finalize(); return 1; } if (myid == 0) args.PrintOptions(cout); // 3. Read the serial mesh from the given mesh file on all processors. We can // handle triangular, quadrilateral, tetrahedral and hexahedral meshes // with the same code. Mesh *mesh; ifstream imesh(mesh_file); if (!imesh) { if (myid == 0) cerr << "\nCan not open mesh file: " << mesh_file << '\n' << endl; MPI_Finalize(); return 2; } mesh = new Mesh(imesh, 1, 1); imesh.close(); int dim = mesh->Dimension(); // 4. Define the ODE solver used for time integration. Several implicit // singly diagonal implicit Runge-Kutta (SDIRK) methods, as well as // explicit Runge-Kutta methods are available. ODESolver *ode_solver; switch (ode_solver_type) { // Implicit L-stable methods case 1: ode_solver = new BackwardEulerSolver; break; case 2: ode_solver = new SDIRK23Solver(2); break; case 3: ode_solver = new SDIRK33Solver; break; // Explicit methods case 11: ode_solver = new ForwardEulerSolver; break; case 12: ode_solver = new RK2Solver(0.5); break; // midpoint method case 13: ode_solver = new RK3SSPSolver; break; case 14: ode_solver = new RK4Solver; break; // Implicit A-stable methods (not L-stable) case 22: ode_solver = new ImplicitMidpointSolver; break; case 23: ode_solver = new SDIRK23Solver; break; case 24: ode_solver = new SDIRK34Solver; break; default: if (myid == 0) cout << "Unknown ODE solver type: " << ode_solver_type << '\n'; MPI_Finalize(); return 3; } // 5. Refine the mesh in serial to increase the resolution. In this example // we do 'ser_ref_levels' of uniform refinement, where 'ser_ref_levels' is // a command-line parameter. for (int lev = 0; lev < ser_ref_levels; lev++) mesh->UniformRefinement(); // 6. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution. Once the // parallel mesh is defined, the serial mesh can be deleted. ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; for (int lev = 0; lev < par_ref_levels; lev++) pmesh->UniformRefinement(); // 7. Define the parallel vector finite element spaces representing the mesh // deformation x_gf, the velocity v_gf, and the initial configuration, // x_ref. Define also the elastic energy density, w_gf, which is in a // discontinuous higher-order space. Since x and v are integrated in time // as a system, we group them together in block vector vx, on the unique // parallel degrees of freedom, with offsets given by array true_offset. H1_FECollection fe_coll(order, dim); ParFiniteElementSpace fespace(pmesh, &fe_coll, dim); int glob_size = fespace.GlobalTrueVSize(); if (myid == 0) cout << "Number of velocity/deformation unknowns: " << glob_size << endl; int true_size = fespace.TrueVSize(); Array<int> true_offset(3); true_offset[0] = 0; true_offset[1] = true_size; true_offset[2] = 2*true_size; BlockVector vx(true_offset); ParGridFunction v_gf(&fespace), x_gf(&fespace); ParGridFunction x_ref(&fespace); pmesh->GetNodes(x_ref); L2_FECollection w_fec(order + 1, dim); ParFiniteElementSpace w_fespace(pmesh, &w_fec); ParGridFunction w_gf(&w_fespace); // 8. Set the initial conditions for v_gf, x_gf and vx, and define the // boundary conditions on a beam-like mesh (see description above). VectorFunctionCoefficient velo(dim, InitialVelocity); v_gf.ProjectCoefficient(velo); VectorFunctionCoefficient deform(dim, InitialDeformation); x_gf.ProjectCoefficient(deform); v_gf.GetTrueDofs(vx.GetBlock(0)); x_gf.GetTrueDofs(vx.GetBlock(1)); Array<int> ess_bdr(fespace.GetMesh()->bdr_attributes.Max()); ess_bdr = 0; ess_bdr[0] = 1; // boundary attribute 1 (index 0) is fixed // 9. Initialize the hyperelastic operator, the GLVis visualization and print // the initial energies. HyperelasticOperator oper(fespace, ess_bdr, visc); socketstream vis_v, vis_w; if (visualization) { char vishost[] = "localhost"; int visport = 19916; vis_v.open(vishost, visport); vis_v.precision(8); visualize(vis_v, pmesh, &x_gf, &v_gf, "Velocity", true); // Make sure all ranks have sent their 'v' solution before initiating // another set of GLVis connections (one from each rank): MPI_Barrier(pmesh->GetComm()); vis_w.open(vishost, visport); if (vis_w) { oper.GetElasticEnergyDensity(x_gf, w_gf); vis_w.precision(8); visualize(vis_w, pmesh, &x_gf, &w_gf, "Elastic energy density", true); } } double ee0 = oper.ElasticEnergy(x_gf); double ke0 = oper.KineticEnergy(v_gf); if (myid == 0) { cout << "initial elastic energy (EE) = " << ee0 << endl; cout << "initial kinetic energy (KE) = " << ke0 << endl; cout << "initial total energy (TE) = " << (ee0 + ke0) << endl; } // 10. Perform time-integration (looping over the time iterations, ti, with a // time-step dt). ode_solver->Init(oper); double t = 0.0; bool last_step = false; for (int ti = 1; !last_step; ti++) { if (t + dt >= t_final - dt/2) last_step = true; ode_solver->Step(vx, t, dt); if (last_step || (ti % vis_steps) == 0) { v_gf.Distribute(vx.GetBlock(0)); x_gf.Distribute(vx.GetBlock(1)); double ee = oper.ElasticEnergy(x_gf); double ke = oper.KineticEnergy(v_gf); if (myid == 0) cout << "step " << ti << ", t = " << t << ", EE = " << ee << ", KE = " << ke << ", ΔTE = " << (ee+ke)-(ee0+ke0) << endl; if (visualization) { visualize(vis_v, pmesh, &x_gf, &v_gf); if (vis_w) { oper.GetElasticEnergyDensity(x_gf, w_gf); visualize(vis_w, pmesh, &x_gf, &w_gf); } } } } // 11. Save the displaced mesh, the velocity and elastic energy. { GridFunction *nodes = &x_gf; int owns_nodes = 0; pmesh->SwapNodes(nodes, owns_nodes); ostringstream mesh_name, velo_name, ee_name; mesh_name << "deformed." << setfill('0') << setw(6) << myid; velo_name << "velocity." << setfill('0') << setw(6) << myid; ee_name << "elastic_energy." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); pmesh->SwapNodes(nodes, owns_nodes); ofstream velo_ofs(velo_name.str().c_str()); velo_ofs.precision(8); v_gf.Save(velo_ofs); ofstream ee_ofs(ee_name.str().c_str()); ee_ofs.precision(8); oper.GetElasticEnergyDensity(x_gf, w_gf); w_gf.Save(ee_ofs); } // 10. Free the used memory. delete ode_solver; delete pmesh; MPI_Finalize(); return 0; }
int main(int argc, char *argv[]) { // 1. Initialize MPI. MPI_Session mpi(argc, argv); int myid = mpi.WorldRank(); // print the cool banner if (mpi.Root()) { display_banner(cout); } // 2. Parse command-line options. const char *mesh_file = "cylinder-hex.mesh"; int ser_ref_levels = 0; int par_ref_levels = 0; int order = 2; int ode_solver_type = 1; double t_final = 100.0; double dt = 0.5; double amp = 2.0; double mu = 1.0; double sigma = 2.0*M_PI*10; double Tcapacity = 1.0; double Tconductivity = 0.01; double alpha = Tconductivity/Tcapacity; double freq = 1.0/60.0; bool visualization = true; bool visit = true; int vis_steps = 1; int gfprint = 0; const char *basename = "Joule"; int amr = 0; int debug = 0; const char *problem = "rod"; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&ser_ref_levels, "-rs", "--refine-serial", "Number of times to refine the mesh uniformly in serial."); args.AddOption(&par_ref_levels, "-rp", "--refine-parallel", "Number of times to refine the mesh uniformly in parallel."); args.AddOption(&order, "-o", "--order", "Order (degree) of the finite elements."); args.AddOption(&ode_solver_type, "-s", "--ode-solver", "ODE solver: 1 - Backward Euler, 2 - SDIRK2, 3 - SDIRK3\n\t." "\t 22 - Mid-Point, 23 - SDIRK23, 34 - SDIRK34."); args.AddOption(&t_final, "-tf", "--t-final", "Final time; start time is 0."); args.AddOption(&dt, "-dt", "--time-step", "Time step."); args.AddOption(&mu, "-mu", "--permeability", "Magnetic permeability coefficient."); args.AddOption(&sigma, "-cnd", "--sigma", "Conductivity coefficient."); args.AddOption(&freq, "-f", "--frequency", "Frequency of oscillation."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.AddOption(&visit, "-visit", "--visit", "-no-visit", "--no-visit", "Enable or disable VisIt visualization."); args.AddOption(&vis_steps, "-vs", "--visualization-steps", "Visualize every n-th timestep."); args.AddOption(&basename, "-k", "--outputfilename", "Name of the visit dump files"); args.AddOption(&gfprint, "-print", "--print", "Print results (grid functions) to disk."); args.AddOption(&amr, "-amr", "--amr", "Enable AMR"); args.AddOption(&STATIC_COND, "-sc", "--static-condensation", "Enable static condensation"); args.AddOption(&debug, "-debug", "--debug", "Print matrices and vectors to disk"); args.AddOption(&SOLVER_PRINT_LEVEL, "-hl", "--hypre-print-level", "Hypre print level"); args.AddOption(&problem, "-p", "--problem", "Name of problem to run"); args.Parse(); if (!args.Good()) { if (mpi.Root()) { args.PrintUsage(cout); } return 1; } if (mpi.Root()) { args.PrintOptions(cout); } aj_ = amp; mj_ = mu; sj_ = sigma; wj_ = 2.0*M_PI*freq; kj_ = sqrt(0.5*wj_*mj_*sj_); hj_ = alpha; dtj_ = dt; rj_ = 1.0; if (mpi.Root()) { cout << "\nSkin depth sqrt(2.0/(wj*mj*sj)) = " << sqrt(2.0/(wj_*mj_*sj_)) << "\nSkin depth sqrt(2.0*dt/(mj*sj)) = " << sqrt(2.0*dt/(mj_*sj_)) << endl; } // 3. Here material properties are assigned to mesh attributes. This code is // not general, it is assumed the mesh has 3 regions each with a different // integer attribute: 1, 2 or 3. // // The coil problem has three regions: 1) coil, 2) air, 3) the rod. // The rod problem has two regions: 1) rod, 2) air. // // We can use the same material maps for both problems. std::map<int, double> sigmaMap, InvTcondMap, TcapMap, InvTcapMap; double sigmaAir; double TcondAir; double TcapAir; if (strcmp(problem,"rod")==0 || strcmp(problem,"coil")==0) { sigmaAir = 1.0e-6 * sigma; TcondAir = 1.0e6 * Tconductivity; TcapAir = 1.0 * Tcapacity; } else { cerr << "Problem " << problem << " not recognized\n"; mfem_error(); } if (strcmp(problem,"rod")==0 || strcmp(problem,"coil")==0) { sigmaMap.insert(pair<int, double>(1, sigma)); sigmaMap.insert(pair<int, double>(2, sigmaAir)); sigmaMap.insert(pair<int, double>(3, sigmaAir)); InvTcondMap.insert(pair<int, double>(1, 1.0/Tconductivity)); InvTcondMap.insert(pair<int, double>(2, 1.0/TcondAir)); InvTcondMap.insert(pair<int, double>(3, 1.0/TcondAir)); TcapMap.insert(pair<int, double>(1, Tcapacity)); TcapMap.insert(pair<int, double>(2, TcapAir)); TcapMap.insert(pair<int, double>(3, TcapAir)); InvTcapMap.insert(pair<int, double>(1, 1.0/Tcapacity)); InvTcapMap.insert(pair<int, double>(2, 1.0/TcapAir)); InvTcapMap.insert(pair<int, double>(3, 1.0/TcapAir)); } else { cerr << "Problem " << problem << " not recognized\n"; mfem_error(); } // 4. Read the serial mesh from the given mesh file on all processors. We can // handle triangular, quadrilateral, tetrahedral and hexahedral meshes // with the same code. Mesh *mesh; mesh = new Mesh(mesh_file, 1, 1); int dim = mesh->Dimension(); // 5. Assign the boundary conditions Array<int> ess_bdr(mesh->bdr_attributes.Max()); Array<int> thermal_ess_bdr(mesh->bdr_attributes.Max()); Array<int> poisson_ess_bdr(mesh->bdr_attributes.Max()); if (strcmp(problem,"coil")==0) { // BEGIN CODE FOR THE COIL PROBLEM // For the coil in a box problem we have surfaces 1) coil end (+), // 2) coil end (-), 3) five sides of box, 4) side of box with coil BC ess_bdr = 0; ess_bdr[0] = 1; // boundary attribute 4 (index 3) is fixed ess_bdr[1] = 1; // boundary attribute 4 (index 3) is fixed ess_bdr[2] = 1; // boundary attribute 4 (index 3) is fixed ess_bdr[3] = 1; // boundary attribute 4 (index 3) is fixed // Same as above, but this is for the thermal operator for HDiv // formulation the essential BC is the flux thermal_ess_bdr = 0; thermal_ess_bdr[2] = 1; // boundary attribute 4 (index 3) is fixed // Same as above, but this is for the poisson eq for H1 formulation the // essential BC is the value of Phi poisson_ess_bdr = 0; poisson_ess_bdr[0] = 1; // boundary attribute 1 (index 0) is fixed poisson_ess_bdr[1] = 1; // boundary attribute 2 (index 1) is fixed // END CODE FOR THE COIL PROBLEM } else if (strcmp(problem,"rod")==0) { // BEGIN CODE FOR THE STRAIGHT ROD PROBLEM // the boundary conditions below are for the straight rod problem ess_bdr = 0; ess_bdr[0] = 1; // boundary attribute 1 (index 0) is fixed (front) ess_bdr[1] = 1; // boundary attribute 2 (index 1) is fixed (rear) ess_bdr[2] = 1; // boundary attribute 3 (index 2) is fixed (outer) // Same as above, but this is for the thermal operator. For HDiv // formulation the essential BC is the flux, which is zero on the front // and sides. Note the Natural BC is T = 0 on the outer surface. thermal_ess_bdr = 0; thermal_ess_bdr[0] = 1; // boundary attribute 1 (index 0) is fixed (front) thermal_ess_bdr[1] = 1; // boundary attribute 2 (index 1) is fixed (rear) // Same as above, but this is for the poisson eq for H1 formulation the // essential BC is the value of Phi poisson_ess_bdr = 0; poisson_ess_bdr[0] = 1; // boundary attribute 1 (index 0) is fixed (front) poisson_ess_bdr[1] = 1; // boundary attribute 2 (index 1) is fixed (back) // END CODE FOR THE STRAIGHT ROD PROBLEM } else { cerr << "Problem " << problem << " not recognized\n"; mfem_error(); } // The following is required for mesh refinement mesh->EnsureNCMesh(); // 6. Define the ODE solver used for time integration. Several implicit // methods are available, including singly diagonal implicit Runge-Kutta // (SDIRK). ODESolver *ode_solver; switch (ode_solver_type) { // Implicit L-stable methods case 1: ode_solver = new BackwardEulerSolver; break; case 2: ode_solver = new SDIRK23Solver(2); break; case 3: ode_solver = new SDIRK33Solver; break; // Implicit A-stable methods (not L-stable) case 22: ode_solver = new ImplicitMidpointSolver; break; case 23: ode_solver = new SDIRK23Solver; break; case 34: ode_solver = new SDIRK34Solver; break; default: if (mpi.Root()) { cout << "Unknown ODE solver type: " << ode_solver_type << '\n'; } delete mesh; return 3; } // 7. Refine the mesh in serial to increase the resolution. In this example // we do 'ser_ref_levels' of uniform refinement, where 'ser_ref_levels' is // a command-line parameter. for (int lev = 0; lev < ser_ref_levels; lev++) { mesh->UniformRefinement(); } // 8. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution. Once the // parallel mesh is defined, the serial mesh can be deleted. ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; for (int lev = 0; lev < par_ref_levels; lev++) { pmesh->UniformRefinement(); } // Make sure tet-only meshes are marked for local refinement. pmesh->Finalize(true); // 9. Apply non-uniform non-conforming mesh refinement to the mesh. The // whole metal region is refined once, before the start of the time loop, // i.e. this is not based on any error estimator. if (amr == 1) { Array<int> ref_list; int numElems = pmesh->GetNE(); for (int ielem = 0; ielem < numElems; ielem++) { int thisAtt = pmesh->GetAttribute(ielem); if (thisAtt == 1) { ref_list.Append(ielem); } } pmesh->GeneralRefinement(ref_list); ref_list.DeleteAll(); } // 10. Reorient the mesh. Must be done after refinement but before definition // of higher order Nedelec spaces pmesh->ReorientTetMesh(); // 11. Rebalance the mesh. Since the mesh was adaptively refined in a // non-uniform way it will be computationally unbalanced. if (pmesh->Nonconforming()) { pmesh->Rebalance(); } // 12. Define the parallel finite element spaces. We use: // // H(curl) for electric field, // H(div) for magnetic flux, // H(div) for thermal flux, // H(grad)/H1 for electrostatic potential, // L2 for temperature // L2 contains discontinuous "cell-center" finite elements, type 2 is // "positive" L2_FECollection L2FEC(order-1, dim); // ND contains Nedelec "edge-centered" vector finite elements with continuous // tangential component. ND_FECollection HCurlFEC(order, dim); // RT contains Raviart-Thomas "face-centered" vector finite elements with // continuous normal component. RT_FECollection HDivFEC(order-1, dim); // H1 contains continuous "node-centered" Lagrange finite elements. H1_FECollection HGradFEC(order, dim); ParFiniteElementSpace L2FESpace(pmesh, &L2FEC); ParFiniteElementSpace HCurlFESpace(pmesh, &HCurlFEC); ParFiniteElementSpace HDivFESpace(pmesh, &HDivFEC); ParFiniteElementSpace HGradFESpace(pmesh, &HGradFEC); // The terminology is TrueVSize is the unique (non-redundant) number of dofs HYPRE_Int glob_size_l2 = L2FESpace.GlobalTrueVSize(); HYPRE_Int glob_size_nd = HCurlFESpace.GlobalTrueVSize(); HYPRE_Int glob_size_rt = HDivFESpace.GlobalTrueVSize(); HYPRE_Int glob_size_h1 = HGradFESpace.GlobalTrueVSize(); if (mpi.Root()) { cout << "Number of Temperature Flux unknowns: " << glob_size_rt << endl; cout << "Number of Temperature unknowns: " << glob_size_l2 << endl; cout << "Number of Electric Field unknowns: " << glob_size_nd << endl; cout << "Number of Magnetic Field unknowns: " << glob_size_rt << endl; cout << "Number of Electrostatic unknowns: " << glob_size_h1 << endl; } int Vsize_l2 = L2FESpace.GetVSize(); int Vsize_nd = HCurlFESpace.GetVSize(); int Vsize_rt = HDivFESpace.GetVSize(); int Vsize_h1 = HGradFESpace.GetVSize(); // the big BlockVector stores the fields as // 0 Temperature // 1 Temperature Flux // 2 P field // 3 E field // 4 B field // 5 Joule Heating Array<int> true_offset(7); true_offset[0] = 0; true_offset[1] = true_offset[0] + Vsize_l2; true_offset[2] = true_offset[1] + Vsize_rt; true_offset[3] = true_offset[2] + Vsize_h1; true_offset[4] = true_offset[3] + Vsize_nd; true_offset[5] = true_offset[4] + Vsize_rt; true_offset[6] = true_offset[5] + Vsize_l2; // The BlockVector is a large contiguous chunk of memory for storing required // data for the hypre vectors, in this case: the temperature L2, the T-flux // HDiv, the E-field HCurl, and the B-field HDiv, and scalar potential P. BlockVector F(true_offset); // grid functions E, B, T, F, P, and w which is the Joule heating ParGridFunction E_gf, B_gf, T_gf, F_gf, w_gf, P_gf; T_gf.MakeRef(&L2FESpace,F, true_offset[0]); F_gf.MakeRef(&HDivFESpace,F, true_offset[1]); P_gf.MakeRef(&HGradFESpace,F,true_offset[2]); E_gf.MakeRef(&HCurlFESpace,F,true_offset[3]); B_gf.MakeRef(&HDivFESpace,F, true_offset[4]); w_gf.MakeRef(&L2FESpace,F, true_offset[5]); // 13. Get the boundary conditions, set up the exact solution grid functions // These VectorCoefficients have an Eval function. Note that e_exact and // b_exact in this case are exact analytical solutions, taking a 3-vector // point as input and returning a 3-vector field VectorFunctionCoefficient E_exact(3, e_exact); VectorFunctionCoefficient B_exact(3, b_exact); FunctionCoefficient T_exact(t_exact); E_exact.SetTime(0.0); B_exact.SetTime(0.0); // 14. Initialize the Diffusion operator, the GLVis visualization and print // the initial energies. MagneticDiffusionEOperator oper(true_offset[6], L2FESpace, HCurlFESpace, HDivFESpace, HGradFESpace, ess_bdr, thermal_ess_bdr, poisson_ess_bdr, mu, sigmaMap, TcapMap, InvTcapMap, InvTcondMap); // This function initializes all the fields to zero or some provided IC oper.Init(F); socketstream vis_T, vis_E, vis_B, vis_w, vis_P; char vishost[] = "localhost"; int visport = 19916; if (visualization) { // Make sure all ranks have sent their 'v' solution before initiating // another set of GLVis connections (one from each rank): MPI_Barrier(pmesh->GetComm()); vis_T.precision(8); vis_E.precision(8); vis_B.precision(8); vis_P.precision(8); vis_w.precision(8); int Wx = 0, Wy = 0; // window position int Ww = 350, Wh = 350; // window size int offx = Ww+10, offy = Wh+45; // window offsets miniapps::VisualizeField(vis_P, vishost, visport, P_gf, "Electric Potential (Phi)", Wx, Wy, Ww, Wh); Wx += offx; miniapps::VisualizeField(vis_E, vishost, visport, E_gf, "Electric Field (E)", Wx, Wy, Ww, Wh); Wx += offx; miniapps::VisualizeField(vis_B, vishost, visport, B_gf, "Magnetic Field (B)", Wx, Wy, Ww, Wh); Wx = 0; Wy += offy; miniapps::VisualizeField(vis_w, vishost, visport, w_gf, "Joule Heating", Wx, Wy, Ww, Wh); Wx += offx; miniapps::VisualizeField(vis_T, vishost, visport, T_gf, "Temperature", Wx, Wy, Ww, Wh); } // VisIt visualization VisItDataCollection visit_dc(basename, pmesh); if ( visit ) { visit_dc.RegisterField("E", &E_gf); visit_dc.RegisterField("B", &B_gf); visit_dc.RegisterField("T", &T_gf); visit_dc.RegisterField("w", &w_gf); visit_dc.RegisterField("Phi", &P_gf); visit_dc.RegisterField("F", &F_gf); visit_dc.SetCycle(0); visit_dc.SetTime(0.0); visit_dc.Save(); } E_exact.SetTime(0.0); B_exact.SetTime(0.0); // 15. Perform time-integration (looping over the time iterations, ti, with a // time-step dt). The object oper is the MagneticDiffusionOperator which // has a Mult() method and an ImplicitSolve() method which are used by // the time integrators. ode_solver->Init(oper); double t = 0.0; bool last_step = false; for (int ti = 1; !last_step; ti++) { if (t + dt >= t_final - dt/2) { last_step = true; } // F is the vector of dofs, t is the current time, and dt is the time step // to advance. ode_solver->Step(F, t, dt); if (debug == 1) { oper.Debug(basename,t); } if (gfprint == 1) { ostringstream T_name, E_name, B_name, F_name, w_name, P_name, mesh_name; T_name << basename << "_" << setfill('0') << setw(6) << t << "_" << "T." << setfill('0') << setw(6) << myid; E_name << basename << "_" << setfill('0') << setw(6) << t << "_" << "E." << setfill('0') << setw(6) << myid; B_name << basename << "_" << setfill('0') << setw(6) << t << "_" << "B." << setfill('0') << setw(6) << myid; F_name << basename << "_" << setfill('0') << setw(6) << t << "_" << "F." << setfill('0') << setw(6) << myid; w_name << basename << "_" << setfill('0') << setw(6) << t << "_" << "w." << setfill('0') << setw(6) << myid; P_name << basename << "_" << setfill('0') << setw(6) << t << "_" << "P." << setfill('0') << setw(6) << myid; mesh_name << basename << "_" << setfill('0') << setw(6) << t << "_" << "mesh." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); mesh_ofs.close(); ofstream T_ofs(T_name.str().c_str()); T_ofs.precision(8); T_gf.Save(T_ofs); T_ofs.close(); ofstream E_ofs(E_name.str().c_str()); E_ofs.precision(8); E_gf.Save(E_ofs); E_ofs.close(); ofstream B_ofs(B_name.str().c_str()); B_ofs.precision(8); B_gf.Save(B_ofs); B_ofs.close(); ofstream F_ofs(F_name.str().c_str()); F_ofs.precision(8); F_gf.Save(B_ofs); F_ofs.close(); ofstream P_ofs(P_name.str().c_str()); P_ofs.precision(8); P_gf.Save(P_ofs); P_ofs.close(); ofstream w_ofs(w_name.str().c_str()); w_ofs.precision(8); w_gf.Save(w_ofs); w_ofs.close(); } if (last_step || (ti % vis_steps) == 0) { double el = oper.ElectricLosses(E_gf); if (mpi.Root()) { cout << fixed; cout << "step " << setw(6) << ti << ",\tt = " << setw(6) << setprecision(3) << t << ",\tdot(E, J) = " << setprecision(8) << el << endl; } // Make sure all ranks have sent their 'v' solution before initiating // another set of GLVis connections (one from each rank): MPI_Barrier(pmesh->GetComm()); if (visualization) { int Wx = 0, Wy = 0; // window position int Ww = 350, Wh = 350; // window size int offx = Ww+10, offy = Wh+45; // window offsets miniapps::VisualizeField(vis_P, vishost, visport, P_gf, "Electric Potential (Phi)", Wx, Wy, Ww, Wh); Wx += offx; miniapps::VisualizeField(vis_E, vishost, visport, E_gf, "Electric Field (E)", Wx, Wy, Ww, Wh); Wx += offx; miniapps::VisualizeField(vis_B, vishost, visport, B_gf, "Magnetic Field (B)", Wx, Wy, Ww, Wh); Wx = 0; Wy += offy; miniapps::VisualizeField(vis_w, vishost, visport, w_gf, "Joule Heating", Wx, Wy, Ww, Wh); Wx += offx; miniapps::VisualizeField(vis_T, vishost, visport, T_gf, "Temperature", Wx, Wy, Ww, Wh); } if (visit) { visit_dc.SetCycle(ti); visit_dc.SetTime(t); visit_dc.Save(); } } } if (visualization) { vis_T.close(); vis_E.close(); vis_B.close(); vis_w.close(); vis_P.close(); } // 16. Free the used memory. delete ode_solver; delete pmesh; return 0; }
int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. const char *mesh_file = "../data/beam-tet.mesh"; int ser_ref_levels = 2; int par_ref_levels = 1; int order = 1; int nev = 5; bool visualization = 1; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&ser_ref_levels, "-rs", "--refine-serial", "Number of times to refine the mesh uniformly in serial."); args.AddOption(&par_ref_levels, "-rp", "--refine-parallel", "Number of times to refine the mesh uniformly in parallel."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree) or -1 for" " isoparametric space."); args.AddOption(&nev, "-n", "--num-eigs", "Number of desired eigenmodes."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.Parse(); if (!args.Good()) { if (myid == 0) { args.PrintUsage(cout); } MPI_Finalize(); return 1; } if (myid == 0) { args.PrintOptions(cout); } // 3. Read the (serial) mesh from the given mesh file on all processors. We // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface // and volume meshes with the same code. Mesh *mesh; ifstream imesh(mesh_file); if (!imesh) { if (myid == 0) { cerr << "\nCan not open mesh file: " << mesh_file << '\n' << endl; } MPI_Finalize(); return 2; } mesh = new Mesh(imesh, 1, 1); imesh.close(); int dim = mesh->Dimension(); // 4. Refine the serial mesh on all processors to increase the resolution. In // this example we do 'ref_levels' of uniform refinement (2 by default, or // specified on the command line with -rs). for (int lev = 0; lev < ser_ref_levels; lev++) { mesh->UniformRefinement(); } // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution (1 time by // default, or specified on the command line with -rp). Once the parallel // mesh is defined, the serial mesh can be deleted. ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; for (int lev = 0; lev < par_ref_levels; lev++) { pmesh->UniformRefinement(); } // 6. Define a parallel finite element space on the parallel mesh. Here we // use the Nedelec finite elements of the specified order. FiniteElementCollection *fec = new ND_FECollection(order, dim); ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec); HYPRE_Int size = fespace->GlobalTrueVSize(); if (myid == 0) { cout << "Number of unknowns: " << size << endl; } // 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite // element space. The first corresponds to the curl curl, while the second // is a simple mass matrix needed on the right hand side of the // generalized eigenvalue problem below. The boundary conditions are // implemented by marking all the boundary attributes from the mesh as // essential. The corresponding degrees of freedom are eliminated with // special values on the diagonal to shift the Dirichlet eigenvalues out // of the computational range. After serial and parallel assembly we // extract the corresponding parallel matrices A and M. ConstantCoefficient one(1.0); Array<int> ess_bdr; if (pmesh->bdr_attributes.Size()) { ess_bdr.SetSize(pmesh->bdr_attributes.Max()); ess_bdr = 1; } ParBilinearForm *a = new ParBilinearForm(fespace); a->AddDomainIntegrator(new CurlCurlIntegrator(one)); if (pmesh->bdr_attributes.Size() == 0) { // Add a mass term if the mesh has no boundary, e.g. periodic mesh or // closed surface. a->AddDomainIntegrator(new VectorFEMassIntegrator(one)); } a->Assemble(); a->EliminateEssentialBCDiag(ess_bdr, 1.0); a->Finalize(); ParBilinearForm *m = new ParBilinearForm(fespace); m->AddDomainIntegrator(new VectorFEMassIntegrator(one)); m->Assemble(); // shift the eigenvalue corresponding to eliminated dofs to a large value m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min()); m->Finalize(); HypreParMatrix *A = a->ParallelAssemble(); HypreParMatrix *M = m->ParallelAssemble(); delete a; delete m; // 8. Define and configure the AME eigensolver and the AMS preconditioner for // A to be used within the solver. Set the matrices which define the // generalized eigenproblem A x = lambda M x. HypreAMS *ams = new HypreAMS(*A,fespace); ams->SetPrintLevel(0); ams->SetSingularProblem(); HypreAME *ame = new HypreAME(MPI_COMM_WORLD); ame->SetNumModes(nev); ame->SetPreconditioner(*ams); ame->SetMaxIter(100); ame->SetTol(1e-8); ame->SetPrintLevel(1); ame->SetMassMatrix(*M); ame->SetOperator(*A); // 9. Compute the eigenmodes and extract the array of eigenvalues. Define a // parallel grid function to represent each of the eigenmodes returned by // the solver. Array<double> eigenvalues; ame->Solve(); ame->GetEigenvalues(eigenvalues); ParGridFunction x(fespace); // 10. Save the refined mesh and the modes in parallel. This output can be // viewed later using GLVis: "glvis -np <np> -m mesh -g mode". { ostringstream mesh_name, mode_name; mesh_name << "mesh." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); for (int i=0; i<nev; i++) { // convert eigenvector from HypreParVector to ParGridFunction x = ame->GetEigenvector(i); mode_name << "mode_" << setfill('0') << setw(2) << i << "." << setfill('0') << setw(6) << myid; ofstream mode_ofs(mode_name.str().c_str()); mode_ofs.precision(8); x.Save(mode_ofs); mode_name.str(""); } } // 11. Send the solution by socket to a GLVis server. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream mode_sock(vishost, visport); mode_sock.precision(8); for (int i=0; i<nev; i++) { if ( myid == 0 ) { cout << "Eigenmode " << i+1 << '/' << nev << ", Lambda = " << eigenvalues[i] << endl; } // convert eigenvector from HypreParVector to ParGridFunction x = ame->GetEigenvector(i); mode_sock << "parallel " << num_procs << " " << myid << "\n" << "solution\n" << *pmesh << x << flush << "window_title 'Eigenmode " << i+1 << '/' << nev << ", Lambda = " << eigenvalues[i] << "'" << endl; char c; if (myid == 0) { cout << "press (q)uit or (c)ontinue --> " << flush; cin >> c; } MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD); if (c != 'c') { break; } } mode_sock.close(); }
int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. const char *mesh_file = "../data/beam-tri.mesh"; int order = 1; int nev = 5; bool visualization = 1; bool amg_elast = 0; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree)."); args.AddOption(&nev, "-n", "--num-eigs", "Number of desired eigenmodes."); args.AddOption(&amg_elast, "-elast", "--amg-for-elasticity", "-sys", "--amg-for-systems", "Use the special AMG elasticity solver (GM/LN approaches), " "or standard AMG for systems (unknown approach)."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.Parse(); if (!args.Good()) { if (myid == 0) { args.PrintUsage(cout); } MPI_Finalize(); return 1; } if (myid == 0) { args.PrintOptions(cout); } // 3. Read the (serial) mesh from the given mesh file on all processors. We // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface // and volume meshes with the same code. Mesh *mesh = new Mesh(mesh_file, 1, 1); int dim = mesh->Dimension(); if (mesh->attributes.Max() < 2) { if (myid == 0) cerr << "\nInput mesh should have at least two materials!" << " (See schematic in ex12p.cpp)\n" << endl; MPI_Finalize(); return 3; } // 4. Select the order of the finite element discretization space. For NURBS // meshes, we increase the order by degree elevation. if (mesh->NURBSext && order > mesh->NURBSext->GetOrder()) { mesh->DegreeElevate(order - mesh->NURBSext->GetOrder()); } // 5. Refine the serial mesh on all processors to increase the resolution. In // this example we do 'ref_levels' of uniform refinement. We choose // 'ref_levels' to be the largest number that gives a final mesh with no // more than 1,000 elements. { int ref_levels = (int)floor(log(1000./mesh->GetNE())/log(2.)/dim); for (int l = 0; l < ref_levels; l++) { mesh->UniformRefinement(); } } // 6. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution. Once the // parallel mesh is defined, the serial mesh can be deleted. ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; { int par_ref_levels = 1; for (int l = 0; l < par_ref_levels; l++) { pmesh->UniformRefinement(); } } // 7. Define a parallel finite element space on the parallel mesh. Here we // use vector finite elements, i.e. dim copies of a scalar finite element // space. We use the ordering by vector dimension (the last argument of // the FiniteElementSpace constructor) which is expected in the systems // version of BoomerAMG preconditioner. For NURBS meshes, we use the // (degree elevated) NURBS space associated with the mesh nodes. FiniteElementCollection *fec; ParFiniteElementSpace *fespace; const bool use_nodal_fespace = pmesh->NURBSext && !amg_elast; if (use_nodal_fespace) { fec = NULL; fespace = (ParFiniteElementSpace *)pmesh->GetNodes()->FESpace(); } else { fec = new H1_FECollection(order, dim); fespace = new ParFiniteElementSpace(pmesh, fec, dim, Ordering::byVDIM); } HYPRE_Int size = fespace->GlobalTrueVSize(); if (myid == 0) { cout << "Number of unknowns: " << size << endl << "Assembling: " << flush; } // 8. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite // element space corresponding to the linear elasticity integrator with // piece-wise constants coefficient lambda and mu, a simple mass matrix // needed on the right hand side of the generalized eigenvalue problem // below. The boundary conditions are implemented by marking only boundary // attribute 1 as essential. We use special values on the diagonal to // shift the Dirichlet eigenvalues out of the computational range. After // serial/parallel assembly we extract the corresponding parallel matrices // A and M. Vector lambda(pmesh->attributes.Max()); lambda = 1.0; lambda(0) = lambda(1)*50; PWConstCoefficient lambda_func(lambda); Vector mu(pmesh->attributes.Max()); mu = 1.0; mu(0) = mu(1)*50; PWConstCoefficient mu_func(mu); Array<int> ess_bdr(pmesh->bdr_attributes.Max()); ess_bdr = 0; ess_bdr[0] = 1; ParBilinearForm *a = new ParBilinearForm(fespace); a->AddDomainIntegrator(new ElasticityIntegrator(lambda_func, mu_func)); if (myid == 0) { cout << "matrix ... " << flush; } a->Assemble(); a->EliminateEssentialBCDiag(ess_bdr, 1.0); a->Finalize(); ParBilinearForm *m = new ParBilinearForm(fespace); m->AddDomainIntegrator(new VectorMassIntegrator()); m->Assemble(); // shift the eigenvalue corresponding to eliminated dofs to a large value m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min()); m->Finalize(); if (myid == 0) { cout << "done." << endl; } HypreParMatrix *A = a->ParallelAssemble(); HypreParMatrix *M = m->ParallelAssemble(); delete a; delete m; // 9. Define and configure the LOBPCG eigensolver and the BoomerAMG // preconditioner for A to be used within the solver. Set the matrices // which define the generalized eigenproblem A x = lambda M x. HypreBoomerAMG * amg = new HypreBoomerAMG(*A); amg->SetPrintLevel(0); if (amg_elast) { amg->SetElasticityOptions(fespace); } else { amg->SetSystemsOptions(dim); } HypreLOBPCG * lobpcg = new HypreLOBPCG(MPI_COMM_WORLD); lobpcg->SetNumModes(nev); lobpcg->SetPreconditioner(*amg); lobpcg->SetMaxIter(100); lobpcg->SetTol(1e-8); lobpcg->SetPrecondUsageMode(1); lobpcg->SetPrintLevel(1); lobpcg->SetMassMatrix(*M); lobpcg->SetOperator(*A); // 10. Compute the eigenmodes and extract the array of eigenvalues. Define a // parallel grid function to represent each of the eigenmodes returned by // the solver. Array<double> eigenvalues; lobpcg->Solve(); lobpcg->GetEigenvalues(eigenvalues); ParGridFunction x(fespace); // 11. For non-NURBS meshes, make the mesh curved based on the finite element // space. This means that we define the mesh elements through a fespace // based transformation of the reference element. This allows us to save // the displaced mesh as a curved mesh when using high-order finite // element displacement field. We assume that the initial mesh (read from // the file) is not higher order curved mesh compared to the chosen FE // space. if (!use_nodal_fespace) { pmesh->SetNodalFESpace(fespace); } // 12. Save the refined mesh and the modes in parallel. This output can be // viewed later using GLVis: "glvis -np <np> -m mesh -g mode". { ostringstream mesh_name, mode_name; mesh_name << "mesh." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); for (int i=0; i<nev; i++) { // convert eigenvector from HypreParVector to ParGridFunction x = lobpcg->GetEigenvector(i); mode_name << "mode_" << setfill('0') << setw(2) << i << "." << setfill('0') << setw(6) << myid; ofstream mode_ofs(mode_name.str().c_str()); mode_ofs.precision(8); x.Save(mode_ofs); mode_name.str(""); } } // 13. Send the above data by socket to a GLVis server. Use the "n" and "b" // keys in GLVis to visualize the displacements. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream mode_sock(vishost, visport); for (int i=0; i<nev; i++) { if ( myid == 0 ) { cout << "Eigenmode " << i+1 << '/' << nev << ", Lambda = " << eigenvalues[i] << endl; } // convert eigenvector from HypreParVector to ParGridFunction x = lobpcg->GetEigenvector(i); mode_sock << "parallel " << num_procs << " " << myid << "\n" << "solution\n" << *pmesh << x << flush << "window_title 'Eigenmode " << i+1 << '/' << nev << ", Lambda = " << eigenvalues[i] << "'" << endl; char c; if (myid == 0) { cout << "press (q)uit or (c)ontinue --> " << flush; cin >> c; } MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD); if (c != 'c') { break; } } mode_sock.close(); }