int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. const char *mesh_file = "../data/beam-tet.mesh"; int order = 1; bool visualization = 1; OptionsParser args(argc, argv); args.AddOption(&mesh_file, "-m", "--mesh", "Mesh file to use."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree)."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.Parse(); if (!args.Good()) { if (myid == 0) args.PrintUsage(cout); MPI_Finalize(); return 1; } if (myid == 0) args.PrintOptions(cout); // 3. Read the (serial) mesh from the given mesh file on all processors. We // can handle triangular, quadrilateral, tetrahedral, hexahedral, surface // and volume meshes with the same code. Mesh *mesh; ifstream imesh(mesh_file); if (!imesh) { if (myid == 0) cerr << "\nCan not open mesh file: " << mesh_file << '\n' << endl; MPI_Finalize(); return 2; } mesh = new Mesh(imesh, 1, 1); imesh.close(); int dim = mesh->Dimension(); if (dim != 3) { if (myid == 0) cerr << "\nThis example requires a 3D mesh\n" << endl; MPI_Finalize(); return 3; } // 4. Refine the serial mesh on all processors to increase the resolution. In // this example we do 'ref_levels' of uniform refinement. We choose // 'ref_levels' to be the largest number that gives a final mesh with no // more than 1,000 elements. { int ref_levels = (int)floor(log(1000./mesh->GetNE())/log(2.)/dim); for (int l = 0; l < ref_levels; l++) mesh->UniformRefinement(); } // 5. Define a parallel mesh by a partitioning of the serial mesh. Refine // this mesh further in parallel to increase the resolution. Once the // parallel mesh is defined, the serial mesh can be deleted. Tetrahedral // meshes need to be reoriented before we can define high-order Nedelec // spaces on them. ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; { int par_ref_levels = 2; for (int l = 0; l < par_ref_levels; l++) pmesh->UniformRefinement(); } pmesh->ReorientTetMesh(); // 6. Define a parallel finite element space on the parallel mesh. Here we // use the lowest order Nedelec finite elements, but we can easily switch // to higher-order spaces by changing the value of p. FiniteElementCollection *fec = new ND_FECollection(order, dim); ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, fec); int size = fespace->GlobalTrueVSize(); if (myid == 0) cout << "Number of unknowns: " << size << endl; // 7. Set up the parallel linear form b(.) which corresponds to the // right-hand side of the FEM linear system, which in this case is // (f,phi_i) where f is given by the function f_exact and phi_i are the // basis functions in the finite element fespace. VectorFunctionCoefficient f(3, f_exact); ParLinearForm *b = new ParLinearForm(fespace); b->AddDomainIntegrator(new VectorFEDomainLFIntegrator(f)); b->Assemble(); // 8. Define the solution vector x as a parallel finite element grid function // corresponding to fespace. Initialize x by projecting the exact // solution. Note that only values from the boundary edges will be used // when eliminating the non-homogeneous boundary condition to modify the // r.h.s. vector b. ParGridFunction x(fespace); VectorFunctionCoefficient E(3, E_exact); x.ProjectCoefficient(E); // 9. Set up the parallel bilinear form corresponding to the EM diffusion // operator curl muinv curl + sigma I, by adding the curl-curl and the // mass domain integrators and finally imposing non-homogeneous Dirichlet // boundary conditions. The boundary conditions are implemented by // marking all the boundary attributes from the mesh as essential // (Dirichlet). After serial and parallel assembly we extract the // parallel matrix A. Coefficient *muinv = new ConstantCoefficient(1.0); Coefficient *sigma = new ConstantCoefficient(1.0); ParBilinearForm *a = new ParBilinearForm(fespace); a->AddDomainIntegrator(new CurlCurlIntegrator(*muinv)); a->AddDomainIntegrator(new VectorFEMassIntegrator(*sigma)); a->Assemble(); Array<int> ess_bdr(pmesh->bdr_attributes.Max()); ess_bdr = 1; a->EliminateEssentialBC(ess_bdr, x, *b); a->Finalize(); // 10. Define the parallel (hypre) matrix and vectors representing a(.,.), // b(.) and the finite element approximation. HypreParMatrix *A = a->ParallelAssemble(); HypreParVector *B = b->ParallelAssemble(); HypreParVector *X = x.ParallelAverage(); *X = 0.0; delete a; delete sigma; delete muinv; delete b; // 11. Define and apply a parallel PCG solver for AX=B with the AMS // preconditioner from hypre. HypreSolver *ams = new HypreAMS(*A, fespace); HyprePCG *pcg = new HyprePCG(*A); pcg->SetTol(1e-12); pcg->SetMaxIter(500); pcg->SetPrintLevel(2); pcg->SetPreconditioner(*ams); pcg->Mult(*B, *X); // 12. Extract the parallel grid function corresponding to the finite element // approximation X. This is the local solution on each processor. x = *X; // 13. Compute and print the L^2 norm of the error. { double err = x.ComputeL2Error(E); if (myid == 0) cout << "\n|| E_h - E ||_{L^2} = " << err << '\n' << endl; } // 14. Save the refined mesh and the solution in parallel. This output can // be viewed later using GLVis: "glvis -np <np> -m mesh -g sol". { ostringstream mesh_name, sol_name; mesh_name << "mesh." << setfill('0') << setw(6) << myid; sol_name << "sol." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); ofstream sol_ofs(sol_name.str().c_str()); sol_ofs.precision(8); x.Save(sol_ofs); } // 15. Send the solution by socket to a GLVis server. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream sol_sock(vishost, visport); sol_sock << "parallel " << num_procs << " " << myid << "\n"; sol_sock.precision(8); sol_sock << "solution\n" << *pmesh << x << flush; } // 16. Free the used memory. delete pcg; delete ams; delete X; delete B; delete A; delete fespace; delete fec; delete pmesh; MPI_Finalize(); return 0; }
int main(int argc, char *argv[]) { // 1. Initialize MPI. int num_procs, myid; MPI_Init(&argc, &argv); MPI_Comm_size(MPI_COMM_WORLD, &num_procs); MPI_Comm_rank(MPI_COMM_WORLD, &myid); // 2. Parse command-line options. int elem_type = 1; int ref_levels = 2; int amr = 0; int order = 2; bool always_snap = false; bool visualization = 1; OptionsParser args(argc, argv); args.AddOption(&elem_type, "-e", "--elem", "Type of elements to use: 0 - triangles, 1 - quads."); args.AddOption(&order, "-o", "--order", "Finite element order (polynomial degree)."); args.AddOption(&ref_levels, "-r", "--refine", "Number of times to refine the mesh uniformly."); args.AddOption(&amr, "-amr", "--refine-locally", "Additional local (non-conforming) refinement:" " 1 = refine around north pole, 2 = refine randomly."); args.AddOption(&visualization, "-vis", "--visualization", "-no-vis", "--no-visualization", "Enable or disable GLVis visualization."); args.AddOption(&always_snap, "-snap", "--always-snap", "-no-snap", "--snap-at-the-end", "If true, snap nodes to the sphere initially and after each refinement " "otherwise, snap only after the last refinement"); args.Parse(); if (!args.Good()) { if (myid == 0) { args.PrintUsage(cout); } MPI_Finalize(); return 1; } if (myid == 0) { args.PrintOptions(cout); } // 3. Generate an initial high-order (surface) mesh on the unit sphere. The // Mesh object represents a 2D mesh in 3 spatial dimensions. We first add // the elements and the vertices of the mesh, and then make it high-order // by specifying a finite element space for its nodes. int Nvert = 8, Nelem = 6; if (elem_type == 0) { Nvert = 6; Nelem = 8; } Mesh *mesh = new Mesh(2, Nvert, Nelem, 0, 3); if (elem_type == 0) // inscribed octahedron { const double tri_v[6][3] = { { 1, 0, 0}, { 0, 1, 0}, {-1, 0, 0}, { 0, -1, 0}, { 0, 0, 1}, { 0, 0, -1} }; const int tri_e[8][3] = { {0, 1, 4}, {1, 2, 4}, {2, 3, 4}, {3, 0, 4}, {1, 0, 5}, {2, 1, 5}, {3, 2, 5}, {0, 3, 5} }; for (int j = 0; j < Nvert; j++) { mesh->AddVertex(tri_v[j]); } for (int j = 0; j < Nelem; j++) { int attribute = j + 1; mesh->AddTriangle(tri_e[j], attribute); } mesh->FinalizeTriMesh(1, 1, true); } else // inscribed cube { const double quad_v[8][3] = { {-1, -1, -1}, {+1, -1, -1}, {+1, +1, -1}, {-1, +1, -1}, {-1, -1, +1}, {+1, -1, +1}, {+1, +1, +1}, {-1, +1, +1} }; const int quad_e[6][4] = { {3, 2, 1, 0}, {0, 1, 5, 4}, {1, 2, 6, 5}, {2, 3, 7, 6}, {3, 0, 4, 7}, {4, 5, 6, 7} }; for (int j = 0; j < Nvert; j++) { mesh->AddVertex(quad_v[j]); } for (int j = 0; j < Nelem; j++) { int attribute = j + 1; mesh->AddQuad(quad_e[j], attribute); } mesh->FinalizeQuadMesh(1, 1, true); } // Set the space for the high-order mesh nodes. H1_FECollection fec(order, mesh->Dimension()); FiniteElementSpace nodal_fes(mesh, &fec, mesh->SpaceDimension()); mesh->SetNodalFESpace(&nodal_fes); // 4. Refine the mesh while snapping nodes to the sphere. Number of parallel // refinements is fixed to 2. for (int l = 0; l <= ref_levels; l++) { if (l > 0) // for l == 0 just perform snapping { mesh->UniformRefinement(); } // Snap the nodes of the refined mesh back to sphere surface. if (always_snap) { SnapNodes(*mesh); } } if (amr == 1) { for (int l = 0; l < 3; l++) { mesh->RefineAtVertex(Vertex(0, 0, 1)); } SnapNodes(*mesh); } else if (amr == 2) { for (int l = 0; l < 2; l++) { mesh->RandomRefinement(0.5); // 50% probability } SnapNodes(*mesh); } ParMesh *pmesh = new ParMesh(MPI_COMM_WORLD, *mesh); delete mesh; { int par_ref_levels = 2; for (int l = 0; l < par_ref_levels; l++) { pmesh->UniformRefinement(); // Snap the nodes of the refined mesh back to sphere surface. if (always_snap) { SnapNodes(*pmesh); } } if (!always_snap || par_ref_levels < 1) { SnapNodes(*pmesh); } } if (amr == 1) { for (int l = 0; l < 2; l++) { pmesh->RefineAtVertex(Vertex(0, 0, 1)); } SnapNodes(*pmesh); } else if (amr == 2) { for (int l = 0; l < 2; l++) { pmesh->RandomRefinement(0.5); // 50% probability } SnapNodes(*pmesh); } // 5. Define a finite element space on the mesh. Here we use isoparametric // finite elements -- the same as the mesh nodes. ParFiniteElementSpace *fespace = new ParFiniteElementSpace(pmesh, &fec); HYPRE_Int size = fespace->GlobalTrueVSize(); if (myid == 0) { cout << "Number of unknowns: " << size << endl; } // 6. Set up the linear form b(.) which corresponds to the right-hand side of // the FEM linear system, which in this case is (1,phi_i) where phi_i are // the basis functions in the finite element fespace. ParLinearForm *b = new ParLinearForm(fespace); ConstantCoefficient one(1.0); FunctionCoefficient rhs_coef (analytic_rhs); FunctionCoefficient sol_coef (analytic_solution); b->AddDomainIntegrator(new DomainLFIntegrator(rhs_coef)); b->Assemble(); // 7. Define the solution vector x as a finite element grid function // corresponding to fespace. Initialize x with initial guess of zero. ParGridFunction x(fespace); x = 0.0; // 8. Set up the bilinear form a(.,.) on the finite element space // corresponding to the Laplacian operator -Delta, by adding the Diffusion // and Mass domain integrators. ParBilinearForm *a = new ParBilinearForm(fespace); a->AddDomainIntegrator(new DiffusionIntegrator(one)); a->AddDomainIntegrator(new MassIntegrator(one)); // 9. Assemble the parallel linear system, applying any transformations // such as: parallel assembly, applying conforming constraints, etc. a->Assemble(); HypreParMatrix A; Vector B, X; Array<int> empty_tdof_list; a->FormLinearSystem(empty_tdof_list, x, *b, A, X, B); // 10. Define and apply a parallel PCG solver for AX=B with the BoomerAMG // preconditioner from hypre. Extract the parallel grid function x // corresponding to the finite element approximation X. This is the local // solution on each processor. HypreSolver *amg = new HypreBoomerAMG(A); HyprePCG *pcg = new HyprePCG(A); pcg->SetTol(1e-12); pcg->SetMaxIter(200); pcg->SetPrintLevel(2); pcg->SetPreconditioner(*amg); pcg->Mult(B, X); a->RecoverFEMSolution(X, *b, x); delete a; delete b; // 11. Compute and print the L^2 norm of the error. double err = x.ComputeL2Error(sol_coef); if (myid == 0) { cout << "\nL2 norm of error: " << err << endl; } // 12. Save the refined mesh and the solution. This output can be viewed // later using GLVis: "glvis -np <np> -m sphere_refined -g sol". { ostringstream mesh_name, sol_name; mesh_name << "sphere_refined." << setfill('0') << setw(6) << myid; sol_name << "sol." << setfill('0') << setw(6) << myid; ofstream mesh_ofs(mesh_name.str().c_str()); mesh_ofs.precision(8); pmesh->Print(mesh_ofs); ofstream sol_ofs(sol_name.str().c_str()); sol_ofs.precision(8); x.Save(sol_ofs); } // 13. Send the solution by socket to a GLVis server. if (visualization) { char vishost[] = "localhost"; int visport = 19916; socketstream sol_sock(vishost, visport); sol_sock << "parallel " << num_procs << " " << myid << "\n"; sol_sock.precision(8); sol_sock << "solution\n" << *pmesh << x << flush; } // 14. Free the used memory. delete pcg; delete amg; delete fespace; delete pmesh; MPI_Finalize(); return 0; }
void TeslaSolver::Solve() { if (myid_ == 0) { cout << "Running solver ... " << endl << flush; } // Initialize the magnetic vector potential with its boundary conditions *a_ = 0.0; // Apply surface currents if available if ( k_ ) { SurfCur_->ComputeSurfaceCurrent(*k_); *a_ = *k_; } // Apply uniform B boundary condition on remaining surfaces a_->ProjectBdrCoefficientTangent(*aBCCoef_, non_k_bdr_); // Initialize the RHS vector HypreParVector *RHS = new HypreParVector(HCurlFESpace_); *RHS = 0.0; HypreParMatrix *MassHCurl = hCurlMass_->ParallelAssemble(); // Initialize the volumetric current density if ( j_ ) { j_->ProjectCoefficient(*jCoef_); HypreParVector *J = j_->ParallelProject(); HypreParVector *JD = new HypreParVector(HCurlFESpace_); MassHCurl->Mult(*J,*JD); DivFreeProj_->Mult(*JD, *RHS); delete J; delete JD; } // Initialize the Magnetization HypreParVector *M = NULL; if ( m_ ) { m_->ProjectCoefficient(*mCoef_); M = m_->ParallelProject(); HypreParMatrix *MassHDiv = hDivMassMuInv_->ParallelAssemble(); HypreParVector *MD = new HypreParVector(HDivFESpace_); MassHDiv->Mult(*M,*MD); Curl_->MultTranspose(*MD,*RHS,mu0_,1.0); delete MassHDiv; delete MD; } // Apply Dirichlet BCs to matrix and right hand side HypreParMatrix *CurlMuInvCurl = curlMuInvCurl_->ParallelAssemble(); HypreParVector *A = a_->ParallelProject(); // Apply the boundary conditions to the assembled matrix and vectors curlMuInvCurl_->ParallelEliminateEssentialBC(ess_bdr_, *CurlMuInvCurl, *A, *RHS); // Define and apply a parallel PCG solver for AX=B with the AMS // preconditioner from hypre. HypreAMS *ams = new HypreAMS(*CurlMuInvCurl, HCurlFESpace_); ams->SetSingularProblem(); HyprePCG *pcg = new HyprePCG(*CurlMuInvCurl); pcg->SetTol(1e-12); pcg->SetMaxIter(500); pcg->SetPrintLevel(2); pcg->SetPreconditioner(*ams); pcg->Mult(*RHS, *A); delete ams; delete pcg; delete CurlMuInvCurl; delete RHS; // Extract the parallel grid function corresponding to the finite // element approximation Phi. This is the local solution on each // processor. *a_ = *A; // Compute the negative Gradient of the solution vector. This is // the magnetic field corresponding to the scalar potential // represented by phi. HypreParVector *B = new HypreParVector(HDivFESpace_); Curl_->Mult(*A,*B); *b_ = *B; // Compute magnetic field (H) from B and M if (myid_ == 0) { cout << "Computing H ... " << flush; } HypreParMatrix *HDivHCurlMuInv = hDivHCurlMuInv_->ParallelAssemble(); HypreParVector *BD = new HypreParVector(HCurlFESpace_); HypreParVector *H = new HypreParVector(HCurlFESpace_); HDivHCurlMuInv->Mult(*B,*BD); if ( M ) { HDivHCurlMuInv->Mult(*M,*BD,-1.0*mu0_,1.0); } HyprePCG * pcgM = new HyprePCG(*MassHCurl); pcgM->SetTol(1e-12); pcgM->SetMaxIter(500); pcgM->SetPrintLevel(0); HypreDiagScale *diagM = new HypreDiagScale; pcgM->SetPreconditioner(*diagM); pcgM->Mult(*BD,*H); *h_ = *H; if (myid_ == 0) { cout << "done." << flush; } delete diagM; delete pcgM; delete HDivHCurlMuInv; delete MassHCurl; delete A; delete B; delete BD; delete H; delete M; if (myid_ == 0) { cout << " Solver done. " << flush; } }
void VoltaSolver::Solve() { if (myid_ == 0) { cout << "Running solver ... " << endl << flush; } // Initialize the electric potential with its boundary conditions *phi_ = 0.0; if ( dbcs_->Size() > 0 ) { if ( phiBCCoef_ ) { // Apply gradient boundary condition phi_->ProjectBdrCoefficient(*phiBCCoef_, ess_bdr_); } else { // Apply piecewise constant boundary condition Array<int> dbc_bdr_attr(pmesh_->bdr_attributes.Max()); for (int i=0; i<dbcs_->Size(); i++) { ConstantCoefficient voltage((*dbcv_)[i]); dbc_bdr_attr = 0; dbc_bdr_attr[(*dbcs_)[i]-1] = 1; phi_->ProjectBdrCoefficient(voltage, dbc_bdr_attr); } } } // Initialize the RHS vector HypreParVector *RHS = new HypreParVector(H1FESpace_); *RHS = 0.0; // Initialize the volumetric charge density if ( rho_ ) { rho_->ProjectCoefficient(*rhoCoef_); HypreParMatrix *MassH1 = h1Mass_->ParallelAssemble(); HypreParVector *Rho = rho_->ParallelProject(); MassH1->Mult(*Rho,*RHS); delete MassH1; delete Rho; } // Initialize the Polarization HypreParVector *P = NULL; if ( p_ ) { p_->ProjectCoefficient(*pCoef_); P = p_->ParallelProject(); HypreParMatrix *MassHCurl = hCurlMass_->ParallelAssemble(); HypreParVector *PD = new HypreParVector(HCurlFESpace_); MassHCurl->Mult(*P,*PD); Grad_->MultTranspose(*PD,*RHS,-1.0,1.0); delete MassHCurl; delete PD; } // Initialize the surface charge density if ( sigma_ ) { *sigma_ = 0.0; Array<int> nbc_bdr_attr(pmesh_->bdr_attributes.Max()); for (int i=0; i<nbcs_->Size(); i++) { ConstantCoefficient sigma_coef((*nbcv_)[i]); nbc_bdr_attr = 0; nbc_bdr_attr[(*nbcs_)[i]-1] = 1; sigma_->ProjectBdrCoefficient(sigma_coef, nbc_bdr_attr); } HypreParMatrix *MassS = h1SurfMass_->ParallelAssemble(); HypreParVector *Sigma = sigma_->ParallelProject(); MassS->Mult(*Sigma,*RHS,1.0,1.0); delete MassS; delete Sigma; } // Apply Dirichlet BCs to matrix and right hand side HypreParMatrix *DivEpsGrad = divEpsGrad_->ParallelAssemble(); HypreParVector *Phi = phi_->ParallelProject(); // Apply the boundary conditions to the assembled matrix and vectors if ( dbcs_->Size() > 0 ) { // According to the selected surfaces divEpsGrad_->ParallelEliminateEssentialBC(ess_bdr_, *DivEpsGrad, *Phi, *RHS); } else { // No surfaces were labeled as Dirichlet so eliminate one DoF Array<int> dof_list(0); if ( myid_ == 0 ) { dof_list.SetSize(1); dof_list[0] = 0; } DivEpsGrad->EliminateRowsCols(dof_list, *Phi, *RHS); } // Define and apply a parallel PCG solver for AX=B with the AMG // preconditioner from hypre. HypreSolver *amg = new HypreBoomerAMG(*DivEpsGrad); HyprePCG *pcg = new HyprePCG(*DivEpsGrad); pcg->SetTol(1e-12); pcg->SetMaxIter(500); pcg->SetPrintLevel(2); pcg->SetPreconditioner(*amg); pcg->Mult(*RHS, *Phi); delete amg; delete pcg; delete DivEpsGrad; delete RHS; // Extract the parallel grid function corresponding to the finite // element approximation Phi. This is the local solution on each // processor. *phi_ = *Phi; // Compute the negative Gradient of the solution vector. This is // the magnetic field corresponding to the scalar potential // represented by phi. HypreParVector *E = new HypreParVector(HCurlFESpace_); Grad_->Mult(*Phi,*E,-1.0); *e_ = *E; delete Phi; // Compute electric displacement (D) from E and P if (myid_ == 0) { cout << "Computing D ... " << flush; } HypreParMatrix *HCurlHDivEps = hCurlHDivEps_->ParallelAssemble(); HypreParVector *ED = new HypreParVector(HDivFESpace_); HypreParVector *D = new HypreParVector(HDivFESpace_); HCurlHDivEps->Mult(*E,*ED); if ( P ) { HypreParMatrix *HCurlHDiv = hCurlHDiv_->ParallelAssemble(); HCurlHDiv->Mult(*P,*ED,-1.0,1.0); delete HCurlHDiv; } HypreParMatrix * MassHDiv = hDivMass_->ParallelAssemble(); HyprePCG * pcgM = new HyprePCG(*MassHDiv); pcgM->SetTol(1e-12); pcgM->SetMaxIter(500); pcgM->SetPrintLevel(0); HypreDiagScale *diagM = new HypreDiagScale; pcgM->SetPreconditioner(*diagM); pcgM->Mult(*ED,*D); *d_ = *D; if (myid_ == 0) { cout << "done." << flush; } delete diagM; delete pcgM; delete HCurlHDivEps; delete MassHDiv; delete E; delete ED; delete D; delete P; if (myid_ == 0) { cout << " Solver done. " << flush; } }
double L2ZZErrorEstimator(BilinearFormIntegrator &flux_integrator, const ParGridFunction &x, ParFiniteElementSpace &smooth_flux_fes, ParFiniteElementSpace &flux_fes, Vector &errors, int norm_p, double solver_tol, int solver_max_it) { // Compute fluxes in discontinuous space GridFunction flux(&flux_fes); flux = 0.0; ParFiniteElementSpace *xfes = x.ParFESpace(); Array<int> xdofs, fdofs; Vector el_x, el_f; for (int i = 0; i < xfes->GetNE(); i++) { xfes->GetElementVDofs(i, xdofs); x.GetSubVector(xdofs, el_x); ElementTransformation *Transf = xfes->GetElementTransformation(i); flux_integrator.ComputeElementFlux(*xfes->GetFE(i), *Transf, el_x, *flux_fes.GetFE(i), el_f, false); flux_fes.GetElementVDofs(i, fdofs); flux.AddElementVector(fdofs, el_f); } // Assemble the linear system for L2 projection into the "smooth" space ParBilinearForm *a = new ParBilinearForm(&smooth_flux_fes); ParLinearForm *b = new ParLinearForm(&smooth_flux_fes); VectorGridFunctionCoefficient f(&flux); if (xfes->GetNE()) { if (smooth_flux_fes.GetFE(0)->GetRangeType() == FiniteElement::SCALAR) { VectorMassIntegrator *vmass = new VectorMassIntegrator; vmass->SetVDim(smooth_flux_fes.GetVDim()); a->AddDomainIntegrator(vmass); b->AddDomainIntegrator(new VectorDomainLFIntegrator(f)); } else { a->AddDomainIntegrator(new VectorFEMassIntegrator); b->AddDomainIntegrator(new VectorFEDomainLFIntegrator(f)); } } b->Assemble(); a->Assemble(); a->Finalize(); // The destination of the projected discontinuous flux ParGridFunction smooth_flux(&smooth_flux_fes); smooth_flux = 0.0; HypreParMatrix* A = a->ParallelAssemble(); HypreParVector* B = b->ParallelAssemble(); HypreParVector* X = smooth_flux.ParallelProject(); delete a; delete b; // Define and apply a parallel PCG solver for AX=B with the BoomerAMG // preconditioner from hypre. HypreBoomerAMG *amg = new HypreBoomerAMG(*A); amg->SetPrintLevel(0); HyprePCG *pcg = new HyprePCG(*A); pcg->SetTol(solver_tol); pcg->SetMaxIter(solver_max_it); pcg->SetPrintLevel(0); pcg->SetPreconditioner(*amg); pcg->Mult(*B, *X); // Extract the parallel grid function corresponding to the finite element // approximation X. This is the local solution on each processor. smooth_flux = *X; delete A; delete B; delete X; delete amg; delete pcg; // Proceed through the elements one by one, and find the Lp norm differences // between the flux as computed per element and the flux projected onto the // smooth_flux_fes space. double total_error = 0.0; errors.SetSize(xfes->GetNE()); for (int i = 0; i < xfes->GetNE(); i++) { errors(i) = ComputeElementLpDistance(norm_p, i, smooth_flux, flux); total_error += pow(errors(i), norm_p); } double glob_error; MPI_Allreduce(&total_error, &glob_error, 1, MPI_DOUBLE, MPI_SUM, xfes->GetComm()); return pow(glob_error, 1.0/norm_p); }