int main ( int argc, char** argv ) { #ifdef HAVE_HDF5 #ifdef HAVE_MPI MPI_Init (&argc, &argv); boost::shared_ptr<Epetra_Comm> comm (new Epetra_MpiComm (MPI_COMM_WORLD) ); const bool verbose (comm->MyPID() == 0); // Read first the data needed if (verbose) { std::cout << " -- Reading the data ... " << std::flush; } GetPot dataFile ( "data" ); if (verbose) { std::cout << " done ! " << std::endl; } const UInt Nelements (dataFile ("mesh/nelements", 10) ); if (verbose) std::cout << " ---> Number of elements : " << Nelements << std::endl; // Load mesh part from HDF5 const std::string partsFileName (dataFile ("test/hdf5_file_name", "cube.h5") ); const std::string ioClass (dataFile ("test/io_class", "new") ); boost::shared_ptr<mesh_Type> mesh; if (! ioClass.compare ("old") ) { ExporterHDF5Mesh3D<mesh_Type> HDF5Input (dataFile, partsFileName); HDF5Input.setComm (comm); mesh = HDF5Input.getMeshPartition(); HDF5Input.closeFile(); } else { PartitionIO<RegionMesh<LinearTetra> > partitionIO (partsFileName, comm); partitionIO.read (mesh); } // Build the FESpaces if (verbose) { std::cout << " -- Building FESpaces ... " << std::flush; } std::string uOrder ("P1"); std::string bOrder ("P1"); boost::shared_ptr<FESpace<mesh_Type, MapEpetra> > uFESpace (new FESpace<mesh_Type, MapEpetra> (mesh, uOrder, 1, comm) ); boost::shared_ptr<FESpace<mesh_Type, MapEpetra> > betaFESpace (new FESpace<mesh_Type, MapEpetra> (mesh, bOrder, 3, comm) ); if (verbose) { std::cout << " done ! " << std::endl; } if (verbose) std::cout << " ---> Dofs: " << uFESpace->dof().numTotalDof() << std::endl; // Build the assembler and the matrices if (verbose) { std::cout << " -- Building assembler ... " << std::flush; } ADRAssembler<mesh_Type, matrix_Type, vector_Type> adrAssembler; if (verbose) { std::cout << " done! " << std::endl; } if (verbose) { std::cout << " -- Setting up assembler ... " << std::flush; } adrAssembler.setup (uFESpace, betaFESpace); if (verbose) { std::cout << " done! " << std::endl; } if (verbose) { std::cout << " -- Defining the matrix ... " << std::flush; } boost::shared_ptr<matrix_Type> systemMatrix (new matrix_Type (uFESpace->map() ) ); *systemMatrix *= 0.0; if (verbose) { std::cout << " done! " << std::endl; } // Perform the assembly of the matrix if (verbose) { std::cout << " -- Adding the diffusion ... " << std::flush; } adrAssembler.addDiffusion (systemMatrix, 1); if (verbose) { std::cout << " done! " << std::endl; } if (verbose) std::cout << " Time needed : " << adrAssembler.diffusionAssemblyChrono().diffCumul() << std::endl; if (verbose) { std::cout << " -- Closing the matrix ... " << std::flush; } systemMatrix->globalAssemble(); if (verbose) { std::cout << " done ! " << std::endl; } Real matrixNorm (systemMatrix->norm1() ); if (verbose) { std::cout << " ---> Norm 1 : " << matrixNorm << std::endl; } if (std::fabs (matrixNorm - 1.68421) > 1e-3) { std::cout << " <!> Matrix has changed !!! <!> " << std::endl; return EXIT_FAILURE; } // Definition and assembly of the RHS if (verbose) { std::cout << " -- Building the RHS ... " << std::flush; } vector_Type rhs (uFESpace->map(), Repeated); rhs *= 0.0; vector_Type fInterpolated (uFESpace->map(), Repeated); fInterpolated *= 0.0; uFESpace->interpolate ( static_cast<function_Type> ( fRhs ), fInterpolated, 0.0); adrAssembler.addMassRhs (rhs, fInterpolated); rhs.globalAssemble(); if (verbose) { std::cout << " done ! " << std::endl; } // Definition and application of the BCs if (verbose) { std::cout << " -- Building the BCHandler ... " << std::flush; } BCHandler bchandler; BCFunctionBase BCu (exactSolution); bchandler.addBC ("Dirichlet", 1, Essential, Full, BCu, 1); for (UInt i (2); i <= 6; ++i) { bchandler.addBC ("Dirichlet", i, Essential, Full, BCu, 1); } if (verbose) { std::cout << " done ! " << std::endl; } if (verbose) { std::cout << " -- Updating the BCs ... " << std::flush; } bchandler.bcUpdate (*uFESpace->mesh(), uFESpace->feBd(), uFESpace->dof() ); if (verbose) { std::cout << " done ! " << std::endl; } if (verbose) { std::cout << " -- Applying the BCs ... " << std::flush; } vector_Type rhsBC (rhs, Unique); bcManage (*systemMatrix, rhsBC, *uFESpace->mesh(), uFESpace->dof(), bchandler, uFESpace->feBd(), 1.0, 0.0); rhs = rhsBC; if (verbose) { std::cout << " done ! " << std::endl; } // Definition of the solver if (verbose) { std::cout << " -- Building the solver ... " << std::flush; } SolverAztecOO linearSolver; if (verbose) { std::cout << " done ! " << std::endl; } if (verbose) { std::cout << " -- Setting up the solver ... " << std::flush; } linearSolver.setDataFromGetPot (dataFile, "solver"); linearSolver.setupPreconditioner (dataFile, "prec"); if (verbose) { std::cout << " done ! " << std::endl; } if (verbose) std::cout << " -- Setting matrix in the solver ... " << std::flush; linearSolver.setMatrix (*systemMatrix); if (verbose) { std::cout << " done ! " << std::endl; } linearSolver.setCommunicator (comm); // Definition of the solution if (verbose) { std::cout << " -- Defining the solution ... " << std::flush; } vector_Type solution (uFESpace->map(), Unique); solution *= 0.0; if (verbose) { std::cout << " done ! " << std::endl; } // Solve the solution if (verbose) { std::cout << " -- Solving the system ... " << std::flush; } linearSolver.solveSystem (rhsBC, solution, systemMatrix); if (verbose) { std::cout << " done ! " << std::endl; } // Error computation if (verbose) { std::cout << " -- Computing the error ... " << std::flush; } vector_Type solutionErr (solution); solutionErr *= 0.0; uFESpace->interpolate ( static_cast<function_Type> ( exactSolution ), solutionErr, 0.0); solutionErr -= solution; solutionErr.abs(); Real l2error (uFESpace->l2Error (exactSolution, vector_Type (solution, Repeated), 0.0) ); if (verbose) { std::cout << " -- done ! " << std::endl; } if (verbose) { std::cout << " ---> Norm L2 : " << l2error << std::endl; } Real linferror (solutionErr.normInf() ); if (verbose) { std::cout << " ---> Norm Inf : " << linferror << std::endl; } if (l2error > 0.0055) { std::cout << " <!> Solution has changed !!! <!> " << std::endl; return EXIT_FAILURE; } if (linferror > 0.0046) { std::cout << " <!> Solution has changed !!! <!> " << std::endl; return EXIT_FAILURE; } // Exporter definition and use if (verbose) { std::cout << " -- Defining the exporter ... " << std::flush; } ExporterHDF5<mesh_Type> exporter (dataFile, mesh, "solution", comm->MyPID() ) ; if (verbose) { std::cout << " done ! " << std::endl; } if (verbose) std::cout << " -- Defining the exported quantities ... " << std::flush; boost::shared_ptr<vector_Type> solutionPtr (new vector_Type (solution, Repeated) ); boost::shared_ptr<vector_Type> solutionErrPtr (new vector_Type (solutionErr, Repeated) ); if (verbose) { std::cout << " done ! " << std::endl; } if (verbose) std::cout << " -- Updating the exporter ... " << std::flush; exporter.addVariable ( ExporterData<mesh_Type>::ScalarField, "solution", uFESpace, solutionPtr, UInt (0) ); exporter.addVariable ( ExporterData<mesh_Type>::ScalarField, "error", uFESpace, solutionErrPtr, UInt (0) ); if (verbose) { std::cout << " done ! " << std::endl; } if (verbose) { std::cout << " -- Exporting ... " << std::flush; } exporter.postProcess (0); exporter.closeFile(); if (verbose) { std::cout << " done ! " << std::endl; } if (verbose) { std::cout << "End Result: TEST PASSED" << std::endl; } MPI_Finalize(); #else std::cout << "This test needs MPI to run. Aborting." << std::endl; return (EXIT_FAILURE); #endif /* HAVE_MPI */ #else std::cout << "This test needs HDF5 to run. Aborting." << std::endl; return (EXIT_FAILURE); #endif /* HAVE_HDF5 */ return ( EXIT_SUCCESS ); }
int main (int argc, char** argv) { #ifdef LIFEV_HAS_HDF5 #ifdef HAVE_MPI typedef RegionMesh<LinearTetra> mesh_Type; MPI_Init (&argc, &argv); boost::shared_ptr<Epetra_Comm> comm (new Epetra_MpiComm (MPI_COMM_WORLD) ); if (comm->NumProc() != 1) { std::cout << "This test needs to be run " << "with a single process. Aborting." << std::endl; return (EXIT_FAILURE); } GetPot commandLine (argc, argv); string dataFileName = commandLine.follow ("data", 2, "-f", "--file"); GetPot dataFile (dataFileName); const UInt numElements (dataFile ("mesh/nelements", 10) ); const UInt numParts (dataFile ("test/num_parts", 4) ); const std::string partsFileName (dataFile ("test/hdf5_file_name", "cube.h5") ); const std::string ioClass (dataFile ("test/io_class", "new") ); std::cout << "Number of elements in mesh: " << numElements << std::endl; std::cout << "Number of parts: " << numParts << std::endl; std::cout << "Name of HDF5 container: " << partsFileName << std::endl; boost::shared_ptr<mesh_Type> fullMeshPtr (new mesh_Type ( comm ) ); regularMesh3D (*fullMeshPtr, 1, numElements, numElements, numElements, false, 2.0, 2.0, 2.0, -1.0, -1.0, -1.0); MeshPartitioner<mesh_Type> meshPart; meshPart.setup (numParts, comm); meshPart.attachUnpartitionedMesh (fullMeshPtr); meshPart.doPartitionGraph(); meshPart.doPartitionMesh(); // Release the original mesh from the MeshPartitioner object and // delete the RegionMesh object meshPart.releaseUnpartitionedMesh(); fullMeshPtr.reset(); // Write mesh parts to HDF5 container if (! ioClass.compare ("old") ) { ExporterHDF5Mesh3D<mesh_Type> HDF5Output (dataFile, meshPart.meshPartition(), partsFileName, comm->MyPID() ); HDF5Output.addPartitionGraph (meshPart.elementDomains(), comm); HDF5Output.addMeshPartitionAll (meshPart.meshPartitions(), comm); HDF5Output.postProcess (0); HDF5Output.closeFile(); } else { boost::shared_ptr<Epetra_MpiComm> mpiComm = boost::dynamic_pointer_cast<Epetra_MpiComm> (comm); PartitionIO<mesh_Type> partitionIO (partsFileName, mpiComm); partitionIO.write (meshPart.meshPartitions() ); } MPI_Finalize(); #else std::cout << "This test needs MPI to run. Aborting." << std::endl; return (EXIT_FAILURE); #endif /* HAVE_MPI */ #else std::cout << "This test needs HDF5 to run. Aborting." << std::endl; return (EXIT_FAILURE); #endif /* LIFEV_HAS_HDF5 */ return (EXIT_SUCCESS); }
int main ( int argc, char** argv ) { #ifdef LIFEV_HAS_HDF5 #ifdef HAVE_MPI MPI_Init (&argc, &argv); boost::shared_ptr<Epetra_MpiComm> comm (new Epetra_MpiComm (MPI_COMM_WORLD) ); const bool verbose (comm->MyPID() == 0); // Read first the data needed if (verbose) { std::cout << " -- Reading the data ... " << std::flush; } // GetPot dataFile ( "data" ); if (verbose) { std::cout << " done ! " << std::endl; } GetPot cl (argc, argv); // partitionerType should be MeshPartitioner, MeshPartitionTool_ParMETIS or // MeshPartitionTool_Zoltan const std::string partitionerType = cl.follow ("MeshPartitioner", "--partitioner-type"); std::string partsFile; partsFile.reserve (50); partsFile += "cube_"; partsFile += partitionerType; partsFile += ".h5"; boost::shared_ptr<mesh_Type> mesh; { PartitionIO<RegionMesh<LinearTetra> > partitionIO (partsFile, comm); partitionIO.read (mesh); } // Build the FESpaces if (verbose) { std::cout << " -- Building FESpaces ... " << std::flush; } std::string uOrder ("P1"); std::string bOrder ("P1"); boost::shared_ptr<FESpace<mesh_Type, MapEpetra> > uFESpace (new FESpace<mesh_Type, MapEpetra> (mesh, uOrder, 1, comm) ); boost::shared_ptr<FESpace<mesh_Type, MapEpetra> > betaFESpace (new FESpace<mesh_Type, MapEpetra> (mesh, bOrder, 3, comm) ); if (verbose) { std::cout << " done ! " << std::endl; } if (verbose) std::cout << " ---> Dofs: " << uFESpace->dof().numTotalDof() << std::endl; // Build the assembler and the matrices if (verbose) { std::cout << " -- Building assembler ... " << std::flush; } ADRAssembler<mesh_Type, matrix_Type, vector_Type> adrAssembler; if (verbose) { std::cout << " done! " << std::endl; } if (verbose) { std::cout << " -- Setting up assembler ... " << std::flush; } adrAssembler.setup (uFESpace, betaFESpace); if (verbose) { std::cout << " done! " << std::endl; } if (verbose) { std::cout << " -- Defining the matrix ... " << std::flush; } boost::shared_ptr<matrix_Type> systemMatrix (new matrix_Type (uFESpace->map() ) ); *systemMatrix *= 0.0; if (verbose) { std::cout << " done! " << std::endl; } // Perform the assembly of the matrix if (verbose) { std::cout << " -- Adding the diffusion ... " << std::flush; } adrAssembler.addDiffusion (systemMatrix, 1); if (verbose) { std::cout << " done! " << std::endl; } if (verbose) std::cout << " Time needed : " << adrAssembler.diffusionAssemblyChrono().diffCumul() << std::endl; if (verbose) { std::cout << " -- Closing the matrix ... " << std::flush; } systemMatrix->globalAssemble(); if (verbose) { std::cout << " done ! " << std::endl; } Real matrixNorm (systemMatrix->normFrobenius() ); if (verbose) { std::cout << " ---> Norm 2 : " << matrixNorm << std::endl; } if (std::fabs (matrixNorm - 35.908) > 1e-3) { std::cout << " <!> Matrix has changed !!! <!> " << std::endl; return EXIT_FAILURE; } if (verbose) { std::cout << "End Result: TEST PASSED" << std::endl; } MPI_Finalize(); #else std::cout << "This test needs MPI to run. Aborting." << std::endl; return (EXIT_FAILURE); #endif /* HAVE_MPI */ #else std::cout << "This test needs HDF5 to run. Aborting." << std::endl; return (EXIT_FAILURE); #endif /* LIFEV_HAS_HDF5 */ return ( EXIT_SUCCESS ); }