Operator &BackwardEulerOperator::GetGradient(const Vector &k) const { delete Jacobian; SparseMatrix *localJ = Add(1.0, M->SpMat(), dt, S->SpMat()); add(*v, dt, k, w); add(*x, dt, w, z); localJ->Add(dt*dt, H->GetLocalGradient(z)); Jacobian = M->ParallelAssemble(localJ); delete localJ; return *Jacobian; }
int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. const char *mesh_file = "../data/beam-tet.mesh"; int order = 1; bool visualization = 1; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree)."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.Parse(); if (!args.Good()) { if (myid == 0) args.PrintUsage(cout); MPI_Finalize(); return 1; } if (myid == 0) args.PrintOptions(cout); // 3. Read the (serial) mesh from the given mesh file on all processors. We // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface // and volume meshes with the same code. Mesh *mesh; ifstream imesh(mesh_file); if (!imesh) { if (myid == 0) cerr << "\nCan not open mesh file: " << mesh_file << '\n' << endl; MPI_Finalize(); return 2; } mesh = new Mesh(imesh, 1, 1); imesh.close(); int dim = mesh->Dimension(); if (dim != 3) { if (myid == 0) cerr << "\nThis example requires a 3D mesh\n" << endl; MPI_Finalize(); return 3; } // 4. Refine the serial mesh on all processors to increase the resolution. In // this example we do 'ref_levels' of uniform refinement. We choose // 'ref_levels' to be the largest number that gives a final mesh with no // more than 1,000 elements. { int ref_levels = (int)floor(log(1000./mesh->GetNE())/log(2.)/dim); for (int l = 0; l < ref_levels; l++) mesh->UniformRefinement(); } // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution. Once the // parallel mesh is defined, the serial mesh can be deleted. Tetrahedral // meshes need to be reoriented before we can define high-order Nedelec // spaces on them. ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; { int par_ref_levels = 2; for (int l = 0; l < par_ref_levels; l++) pmesh->UniformRefinement(); } pmesh->ReorientTetMesh(); // 6. Define a parallel finite element space on the parallel mesh. Here we // use the lowest order Nedelec finite elements, but we can easily switch // to higher-order spaces by changing the value of p. FiniteElementCollection *fec = new ND_FECollection(order, dim); ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec); int size = fespace->GlobalTrueVSize(); if (myid == 0) cout << "Number of unknowns: " << size << endl; // 7. Set up the parallel linear form b(.) which corresponds to the // right-hand side of the FEM linear system, which in this case is // (f,phi_i) where f is given by the function f_exact and phi_i are the // basis functions in the finite element fespace. VectorFunctionCoefficient f(3, f_exact); ParLinearForm *b = new ParLinearForm(fespace); b->AddDomainIntegrator(new VectorFEDomainLFIntegrator(f)); b->Assemble(); // 8. Define the solution vector x as a parallel finite element grid function // corresponding to fespace. Initialize x by projecting the exact // solution. Note that only values from the boundary edges will be used // when eliminating the non-homogeneous boundary condition to modify the // r.h.s. vector b. ParGridFunction x(fespace); VectorFunctionCoefficient E(3, E_exact); x.ProjectCoefficient(E); // 9. Set up the parallel bilinear form corresponding to the EM diffusion // operator curl muinv curl + sigma I, by adding the curl-curl and the // mass domain integrators and finally imposing non-homogeneous Dirichlet // boundary conditions. The boundary conditions are implemented by // marking all the boundary attributes from the mesh as essential // (Dirichlet). After serial and parallel assembly we extract the // parallel matrix A. Coefficient *muinv = new ConstantCoefficient(1.0); Coefficient *sigma = new ConstantCoefficient(1.0); ParBilinearForm *a = new ParBilinearForm(fespace); a->AddDomainIntegrator(new CurlCurlIntegrator(*muinv)); a->AddDomainIntegrator(new VectorFEMassIntegrator(*sigma)); a->Assemble(); Array<int> ess_bdr(pmesh->bdr_attributes.Max()); ess_bdr = 1; a->EliminateEssentialBC(ess_bdr, x, *b); a->Finalize(); // 10. Define the parallel (hypre) matrix and vectors representing a(.,.), // b(.) and the finite element approximation. HypreParMatrix *A = a->ParallelAssemble(); HypreParVector *B = b->ParallelAssemble(); HypreParVector *X = x.ParallelAverage(); *X = 0.0; delete a; delete sigma; delete muinv; delete b; // 11. Define and apply a parallel PCG solver for AX=B with the AMS // preconditioner from hypre. HypreSolver *ams = new HypreAMS(*A, fespace); HyprePCG *pcg = new HyprePCG(*A); pcg->SetTol(1e-12); pcg->SetMaxIter(500); pcg->SetPrintLevel(2); pcg->SetPreconditioner(*ams); pcg->Mult(*B, *X); // 12. Extract the parallel grid function corresponding to the finite element // approximation X. This is the local solution on each processor. x = *X; // 13. Compute and print the L^2 norm of the error. { double err = x.ComputeL2Error(E); if (myid == 0) cout << "\n|| E_h - E ||_{L^2} = " << err << '\n' << endl; } // 14. Save the refined mesh and the solution in parallel. This output can // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol". { ostringstream mesh_name, sol_name; mesh_name << "mesh." << setfill('0') << setw(6) << myid; sol_name << "sol." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); ofstream sol_ofs(sol_name.str().c_str()); sol_ofs.precision(8); x.Save(sol_ofs); } // 15. Send the solution by socket to a GLVis server. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream sol_sock(vishost, visport); sol_sock << "parallel " << num_procs << " " << myid << "\n"; sol_sock.precision(8); sol_sock << "solution\n" << *pmesh << x << flush; } // 16. Free the used memory. delete pcg; delete ams; delete X; delete B; delete A; delete fespace; delete fec; delete pmesh; MPI_Finalize(); return 0; }
double L2ZZErrorEstimator(BilinearFormIntegrator &flux_integrator, const ParGridFunction &x, ParFiniteElementSpace &smooth_flux_fes, ParFiniteElementSpace &flux_fes, Vector &errors, int norm_p, double solver_tol, int solver_max_it) { // Compute fluxes in discontinuous space GridFunction flux(&flux_fes); flux = 0.0; ParFiniteElementSpace *xfes = x.ParFESpace(); Array<int> xdofs, fdofs; Vector el_x, el_f; for (int i = 0; i < xfes->GetNE(); i++) { xfes->GetElementVDofs(i, xdofs); x.GetSubVector(xdofs, el_x); ElementTransformation *Transf = xfes->GetElementTransformation(i); flux_integrator.ComputeElementFlux(*xfes->GetFE(i), *Transf, el_x, *flux_fes.GetFE(i), el_f, false); flux_fes.GetElementVDofs(i, fdofs); flux.AddElementVector(fdofs, el_f); } // Assemble the linear system for L2 projection into the "smooth" space ParBilinearForm *a = new ParBilinearForm(&smooth_flux_fes); ParLinearForm *b = new ParLinearForm(&smooth_flux_fes); VectorGridFunctionCoefficient f(&flux); if (xfes->GetNE()) { if (smooth_flux_fes.GetFE(0)->GetRangeType() == FiniteElement::SCALAR) { VectorMassIntegrator *vmass = new VectorMassIntegrator; vmass->SetVDim(smooth_flux_fes.GetVDim()); a->AddDomainIntegrator(vmass); b->AddDomainIntegrator(new VectorDomainLFIntegrator(f)); } else { a->AddDomainIntegrator(new VectorFEMassIntegrator); b->AddDomainIntegrator(new VectorFEDomainLFIntegrator(f)); } } b->Assemble(); a->Assemble(); a->Finalize(); // The destination of the projected discontinuous flux ParGridFunction smooth_flux(&smooth_flux_fes); smooth_flux = 0.0; HypreParMatrix* A = a->ParallelAssemble(); HypreParVector* B = b->ParallelAssemble(); HypreParVector* X = smooth_flux.ParallelProject(); delete a; delete b; // Define and apply a parallel PCG solver for AX=B with the BoomerAMG // preconditioner from hypre. HypreBoomerAMG *amg = new HypreBoomerAMG(*A); amg->SetPrintLevel(0); HyprePCG *pcg = new HyprePCG(*A); pcg->SetTol(solver_tol); pcg->SetMaxIter(solver_max_it); pcg->SetPrintLevel(0); pcg->SetPreconditioner(*amg); pcg->Mult(*B, *X); // Extract the parallel grid function corresponding to the finite element // approximation X. This is the local solution on each processor. smooth_flux = *X; delete A; delete B; delete X; delete amg; delete pcg; // Proceed through the elements one by one, and find the Lp norm differences // between the flux as computed per element and the flux projected onto the // smooth_flux_fes space. double total_error = 0.0; errors.SetSize(xfes->GetNE()); for (int i = 0; i < xfes->GetNE(); i++) { errors(i) = ComputeElementLpDistance(norm_p, i, smooth_flux, flux); total_error += pow(errors(i), norm_p); } double glob_error; MPI_Allreduce(&total_error, &glob_error, 1, MPI_DOUBLE, MPI_SUM, xfes->GetComm()); return pow(glob_error, 1.0/norm_p); }
int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. const char *mesh_file = "../data/beam-tet.mesh"; int ser_ref_levels = 2; int par_ref_levels = 1; int order = 1; int nev = 5; bool visualization = 1; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&ser_ref_levels, "-rs", "--refine-serial", "Number of times to refine the mesh uniformly in serial."); args.AddOption(&par_ref_levels, "-rp", "--refine-parallel", "Number of times to refine the mesh uniformly in parallel."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree) or -1 for" " isoparametric space."); args.AddOption(&nev, "-n", "--num-eigs", "Number of desired eigenmodes."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.Parse(); if (!args.Good()) { if (myid == 0) { args.PrintUsage(cout); } MPI_Finalize(); return 1; } if (myid == 0) { args.PrintOptions(cout); } // 3. Read the (serial) mesh from the given mesh file on all processors. We // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface // and volume meshes with the same code. Mesh *mesh; ifstream imesh(mesh_file); if (!imesh) { if (myid == 0) { cerr << "\nCan not open mesh file: " << mesh_file << '\n' << endl; } MPI_Finalize(); return 2; } mesh = new Mesh(imesh, 1, 1); imesh.close(); int dim = mesh->Dimension(); // 4. Refine the serial mesh on all processors to increase the resolution. In // this example we do 'ref_levels' of uniform refinement (2 by default, or // specified on the command line with -rs). for (int lev = 0; lev < ser_ref_levels; lev++) { mesh->UniformRefinement(); } // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution (1 time by // default, or specified on the command line with -rp). Once the parallel // mesh is defined, the serial mesh can be deleted. ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; for (int lev = 0; lev < par_ref_levels; lev++) { pmesh->UniformRefinement(); } // 6. Define a parallel finite element space on the parallel mesh. Here we // use the Nedelec finite elements of the specified order. FiniteElementCollection *fec = new ND_FECollection(order, dim); ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec); HYPRE_Int size = fespace->GlobalTrueVSize(); if (myid == 0) { cout << "Number of unknowns: " << size << endl; } // 7. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite // element space. The first corresponds to the curl curl, while the second // is a simple mass matrix needed on the right hand side of the // generalized eigenvalue problem below. The boundary conditions are // implemented by marking all the boundary attributes from the mesh as // essential. The corresponding degrees of freedom are eliminated with // special values on the diagonal to shift the Dirichlet eigenvalues out // of the computational range. After serial and parallel assembly we // extract the corresponding parallel matrices A and M. ConstantCoefficient one(1.0); Array<int> ess_bdr; if (pmesh->bdr_attributes.Size()) { ess_bdr.SetSize(pmesh->bdr_attributes.Max()); ess_bdr = 1; } ParBilinearForm *a = new ParBilinearForm(fespace); a->AddDomainIntegrator(new CurlCurlIntegrator(one)); if (pmesh->bdr_attributes.Size() == 0) { // Add a mass term if the mesh has no boundary, e.g. periodic mesh or // closed surface. a->AddDomainIntegrator(new VectorFEMassIntegrator(one)); } a->Assemble(); a->EliminateEssentialBCDiag(ess_bdr, 1.0); a->Finalize(); ParBilinearForm *m = new ParBilinearForm(fespace); m->AddDomainIntegrator(new VectorFEMassIntegrator(one)); m->Assemble(); // shift the eigenvalue corresponding to eliminated dofs to a large value m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min()); m->Finalize(); HypreParMatrix *A = a->ParallelAssemble(); HypreParMatrix *M = m->ParallelAssemble(); delete a; delete m; // 8. Define and configure the AME eigensolver and the AMS preconditioner for // A to be used within the solver. Set the matrices which define the // generalized eigenproblem A x = lambda M x. HypreAMS *ams = new HypreAMS(*A,fespace); ams->SetPrintLevel(0); ams->SetSingularProblem(); HypreAME *ame = new HypreAME(MPI_COMM_WORLD); ame->SetNumModes(nev); ame->SetPreconditioner(*ams); ame->SetMaxIter(100); ame->SetTol(1e-8); ame->SetPrintLevel(1); ame->SetMassMatrix(*M); ame->SetOperator(*A); // 9. Compute the eigenmodes and extract the array of eigenvalues. Define a // parallel grid function to represent each of the eigenmodes returned by // the solver. Array<double> eigenvalues; ame->Solve(); ame->GetEigenvalues(eigenvalues); ParGridFunction x(fespace); // 10. Save the refined mesh and the modes in parallel. This output can be // viewed later using GLVis: "glvis -np <np> -m mesh -g mode". { ostringstream mesh_name, mode_name; mesh_name << "mesh." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); for (int i=0; i<nev; i++) { // convert eigenvector from HypreParVector to ParGridFunction x = ame->GetEigenvector(i); mode_name << "mode_" << setfill('0') << setw(2) << i << "." << setfill('0') << setw(6) << myid; ofstream mode_ofs(mode_name.str().c_str()); mode_ofs.precision(8); x.Save(mode_ofs); mode_name.str(""); } } // 11. Send the solution by socket to a GLVis server. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream mode_sock(vishost, visport); mode_sock.precision(8); for (int i=0; i<nev; i++) { if ( myid == 0 ) { cout << "Eigenmode " << i+1 << '/' << nev << ", Lambda = " << eigenvalues[i] << endl; } // convert eigenvector from HypreParVector to ParGridFunction x = ame->GetEigenvector(i); mode_sock << "parallel " << num_procs << " " << myid << "\n" << "solution\n" << *pmesh << x << flush << "window_title 'Eigenmode " << i+1 << '/' << nev << ", Lambda = " << eigenvalues[i] << "'" << endl; char c; if (myid == 0) { cout << "press (q)uit or (c)ontinue --> " << flush; cin >> c; } MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD); if (c != 'c') { break; } } mode_sock.close(); }
int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. const char *mesh_file = "../data/beam-tri.mesh"; int order = 1; int nev = 5; bool visualization = 1; bool amg_elast = 0; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree)."); args.AddOption(&nev, "-n", "--num-eigs", "Number of desired eigenmodes."); args.AddOption(&amg_elast, "-elast", "--amg-for-elasticity", "-sys", "--amg-for-systems", "Use the special AMG elasticity solver (GM/LN approaches), " "or standard AMG for systems (unknown approach)."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.Parse(); if (!args.Good()) { if (myid == 0) { args.PrintUsage(cout); } MPI_Finalize(); return 1; } if (myid == 0) { args.PrintOptions(cout); } // 3. Read the (serial) mesh from the given mesh file on all processors. We // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface // and volume meshes with the same code. Mesh *mesh = new Mesh(mesh_file, 1, 1); int dim = mesh->Dimension(); if (mesh->attributes.Max() < 2) { if (myid == 0) cerr << "\nInput mesh should have at least two materials!" << " (See schematic in ex12p.cpp)\n" << endl; MPI_Finalize(); return 3; } // 4. Select the order of the finite element discretization space. For NURBS // meshes, we increase the order by degree elevation. if (mesh->NURBSext && order > mesh->NURBSext->GetOrder()) { mesh->DegreeElevate(order - mesh->NURBSext->GetOrder()); } // 5. Refine the serial mesh on all processors to increase the resolution. In // this example we do 'ref_levels' of uniform refinement. We choose // 'ref_levels' to be the largest number that gives a final mesh with no // more than 1,000 elements. { int ref_levels = (int)floor(log(1000./mesh->GetNE())/log(2.)/dim); for (int l = 0; l < ref_levels; l++) { mesh->UniformRefinement(); } } // 6. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution. Once the // parallel mesh is defined, the serial mesh can be deleted. ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; { int par_ref_levels = 1; for (int l = 0; l < par_ref_levels; l++) { pmesh->UniformRefinement(); } } // 7. Define a parallel finite element space on the parallel mesh. Here we // use vector finite elements, i.e. dim copies of a scalar finite element // space. We use the ordering by vector dimension (the last argument of // the FiniteElementSpace constructor) which is expected in the systems // version of BoomerAMG preconditioner. For NURBS meshes, we use the // (degree elevated) NURBS space associated with the mesh nodes. FiniteElementCollection *fec; ParFiniteElementSpace *fespace; const bool use_nodal_fespace = pmesh->NURBSext && !amg_elast; if (use_nodal_fespace) { fec = NULL; fespace = (ParFiniteElementSpace *)pmesh->GetNodes()->FESpace(); } else { fec = new H1_FECollection(order, dim); fespace = new ParFiniteElementSpace(pmesh, fec, dim, Ordering::byVDIM); } HYPRE_Int size = fespace->GlobalTrueVSize(); if (myid == 0) { cout << "Number of unknowns: " << size << endl << "Assembling: " << flush; } // 8. Set up the parallel bilinear forms a(.,.) and m(.,.) on the finite // element space corresponding to the linear elasticity integrator with // piece-wise constants coefficient lambda and mu, a simple mass matrix // needed on the right hand side of the generalized eigenvalue problem // below. The boundary conditions are implemented by marking only boundary // attribute 1 as essential. We use special values on the diagonal to // shift the Dirichlet eigenvalues out of the computational range. After // serial/parallel assembly we extract the corresponding parallel matrices // A and M. Vector lambda(pmesh->attributes.Max()); lambda = 1.0; lambda(0) = lambda(1)*50; PWConstCoefficient lambda_func(lambda); Vector mu(pmesh->attributes.Max()); mu = 1.0; mu(0) = mu(1)*50; PWConstCoefficient mu_func(mu); Array<int> ess_bdr(pmesh->bdr_attributes.Max()); ess_bdr = 0; ess_bdr[0] = 1; ParBilinearForm *a = new ParBilinearForm(fespace); a->AddDomainIntegrator(new ElasticityIntegrator(lambda_func, mu_func)); if (myid == 0) { cout << "matrix ... " << flush; } a->Assemble(); a->EliminateEssentialBCDiag(ess_bdr, 1.0); a->Finalize(); ParBilinearForm *m = new ParBilinearForm(fespace); m->AddDomainIntegrator(new VectorMassIntegrator()); m->Assemble(); // shift the eigenvalue corresponding to eliminated dofs to a large value m->EliminateEssentialBCDiag(ess_bdr, numeric_limits<double>::min()); m->Finalize(); if (myid == 0) { cout << "done." << endl; } HypreParMatrix *A = a->ParallelAssemble(); HypreParMatrix *M = m->ParallelAssemble(); delete a; delete m; // 9. Define and configure the LOBPCG eigensolver and the BoomerAMG // preconditioner for A to be used within the solver. Set the matrices // which define the generalized eigenproblem A x = lambda M x. HypreBoomerAMG * amg = new HypreBoomerAMG(*A); amg->SetPrintLevel(0); if (amg_elast) { amg->SetElasticityOptions(fespace); } else { amg->SetSystemsOptions(dim); } HypreLOBPCG * lobpcg = new HypreLOBPCG(MPI_COMM_WORLD); lobpcg->SetNumModes(nev); lobpcg->SetPreconditioner(*amg); lobpcg->SetMaxIter(100); lobpcg->SetTol(1e-8); lobpcg->SetPrecondUsageMode(1); lobpcg->SetPrintLevel(1); lobpcg->SetMassMatrix(*M); lobpcg->SetOperator(*A); // 10. Compute the eigenmodes and extract the array of eigenvalues. Define a // parallel grid function to represent each of the eigenmodes returned by // the solver. Array<double> eigenvalues; lobpcg->Solve(); lobpcg->GetEigenvalues(eigenvalues); ParGridFunction x(fespace); // 11. For non-NURBS meshes, make the mesh curved based on the finite element // space. This means that we define the mesh elements through a fespace // based transformation of the reference element. This allows us to save // the displaced mesh as a curved mesh when using high-order finite // element displacement field. We assume that the initial mesh (read from // the file) is not higher order curved mesh compared to the chosen FE // space. if (!use_nodal_fespace) { pmesh->SetNodalFESpace(fespace); } // 12. Save the refined mesh and the modes in parallel. This output can be // viewed later using GLVis: "glvis -np <np> -m mesh -g mode". { ostringstream mesh_name, mode_name; mesh_name << "mesh." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); for (int i=0; i<nev; i++) { // convert eigenvector from HypreParVector to ParGridFunction x = lobpcg->GetEigenvector(i); mode_name << "mode_" << setfill('0') << setw(2) << i << "." << setfill('0') << setw(6) << myid; ofstream mode_ofs(mode_name.str().c_str()); mode_ofs.precision(8); x.Save(mode_ofs); mode_name.str(""); } } // 13. Send the above data by socket to a GLVis server. Use the "n" and "b" // keys in GLVis to visualize the displacements. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream mode_sock(vishost, visport); for (int i=0; i<nev; i++) { if ( myid == 0 ) { cout << "Eigenmode " << i+1 << '/' << nev << ", Lambda = " << eigenvalues[i] << endl; } // convert eigenvector from HypreParVector to ParGridFunction x = lobpcg->GetEigenvector(i); mode_sock << "parallel " << num_procs << " " << myid << "\n" << "solution\n" << *pmesh << x << flush << "window_title 'Eigenmode " << i+1 << '/' << nev << ", Lambda = " << eigenvalues[i] << "'" << endl; char c; if (myid == 0) { cout << "press (q)uit or (c)ontinue --> " << flush; cin >> c; } MPI_Bcast(&c, 1, MPI_CHAR, 0, MPI_COMM_WORLD); if (c != 'c') { break; } } mode_sock.close(); }