int main(int argc, char *argv[]) { ApplicationsLib::LogogSetup logog_setup; TCLAP::CmdLine cmd( "Moves the mesh nodes using the given displacement vector or if no " "displacement vector is given, moves the mesh nodes such that the " "centroid of the given mesh is in the origin.\n\n" "OpenGeoSys-6 software, version " + BaseLib::BuildInfo::ogs_version + ".\n" "Copyright (c) 2012-2019, OpenGeoSys Community " "(http://www.opengeosys.org)", ' ', BaseLib::BuildInfo::ogs_version); // Define a value argument and add it to the command line. // A value arg defines a flag and a type of value that it expects, // such as "-m meshfile". TCLAP::ValueArg<std::string> mesh_arg("m","mesh","input mesh file",true,"","string"); // Add the argument mesh_arg to the CmdLine object. The CmdLine object // uses this Arg to parse the command line. cmd.add( mesh_arg ); TCLAP::ValueArg<double> x_arg("x","x","displacement in x direction", false, 0.0,"floating point number"); cmd.add(x_arg); TCLAP::ValueArg<double> y_arg("y","y","displacement in y direction", false, 0.0,"floating point number"); cmd.add(y_arg); TCLAP::ValueArg<double> z_arg("z","z","displacement in z direction", false, 0.0,"floating point number"); cmd.add(z_arg); TCLAP::ValueArg<std::string> mesh_out_arg("o","output-mesh","output mesh file", false, "", "string"); cmd.add(mesh_out_arg); cmd.parse( argc, argv ); std::string fname (mesh_arg.getValue()); std::unique_ptr<MeshLib::Mesh> mesh(MeshLib::IO::readMeshFromFile(fname)); if (!mesh) { ERR("Could not read mesh from file '%s'.", fname.c_str()); return EXIT_FAILURE; } MeshLib::Node displacement(0.0, 0.0, 0.0); if (fabs(x_arg.getValue()) < std::numeric_limits<double>::epsilon() && fabs(y_arg.getValue()) < std::numeric_limits<double>::epsilon() && fabs(z_arg.getValue()) < std::numeric_limits<double>::epsilon()) { GeoLib::AABB aabb(mesh->getNodes().begin(), mesh->getNodes().end()); displacement[0] = -(aabb.getMaxPoint()[0] + aabb.getMinPoint()[0])/2.0; displacement[1] = -(aabb.getMaxPoint()[1] + aabb.getMinPoint()[1])/2.0; displacement[2] = -(aabb.getMaxPoint()[2] + aabb.getMinPoint()[2])/2.0; } else { displacement[0] = x_arg.getValue(); displacement[1] = y_arg.getValue(); displacement[2] = z_arg.getValue(); } INFO("translate model (%f, %f, %f).", displacement[0], displacement[1], displacement[2]); MeshLib::moveMeshNodes( mesh->getNodes().begin(), mesh->getNodes().end(), displacement); std::string out_fname(mesh_out_arg.getValue()); if (out_fname.empty()) { out_fname = BaseLib::dropFileExtension(mesh_out_arg.getValue()); out_fname += "_displaced.vtu"; } MeshLib::IO::writeMeshToFile(*mesh, out_fname); return EXIT_SUCCESS; }
Mesh createGrid(const RVector & x, const RVector & y){ Mesh mesh(2); mesh.createGrid(x, y); return mesh; }
Mesh createMesh1D(const RVector & x){ Mesh mesh(1); mesh.create1DGrid(x); return mesh; }
gf_view<Variable, Target> make_gf_from_g_and_tail(gf_impl<Variable, Target, S, Evaluator, V, C> const &g, tail t) { details::_equal_or_throw(t.shape(), get_target_shape(g)); auto g2 = gf<Variable, Target, no_tail>{g}; // copy the function without tail return {std::move(g2.mesh()), std::move(g2.data()), std::move(t), g2.symmetry()}; }
int main(int argc, char* argv[]) { // Load the mesh. MeshSharedPtr mesh(new Mesh); MeshReaderH2D mloader; mloader.load("square.mesh", mesh); // Initial mesh refinements. for (int i = 0; i < INIT_GLOB_REF_NUM; i++) mesh->refine_all_elements(); mesh->refine_towards_boundary("Top", INIT_REF_NUM_BDY); // Initialize boundary conditions. CustomEssentialBCNonConst bc_essential({ "Bottom", "Right", "Top", "Left" }); EssentialBCs<double> bcs(&bc_essential); // Create an H1 space with default shapeset. SpaceSharedPtr<double> space(new H1Space<double>(mesh, &bcs, P_INIT)); int ndof = space->get_num_dofs(); Hermes::Mixins::Loggable::Static::info("ndof = %d.", ndof); // Zero initial solutions. This is why we use H_OFFSET. MeshFunctionSharedPtr<double> h_time_prev(new ZeroSolution<double>(mesh)); // Initialize views. ScalarView view("Initial condition", new WinGeom(0, 0, 600, 500)); view.fix_scale_width(80); // Visualize the initial condition. view.show(h_time_prev); // Initialize the constitutive relations. ConstitutiveRelations* constitutive_relations; if (constitutive_relations_type == CONSTITUTIVE_GENUCHTEN) constitutive_relations = new ConstitutiveRelationsGenuchten(ALPHA, M, N, THETA_S, THETA_R, K_S, STORATIVITY); else constitutive_relations = new ConstitutiveRelationsGardner(ALPHA, THETA_S, THETA_R, K_S); // Initialize the weak formulation. double current_time = 0; WeakFormSharedPtr<double> wf(new CustomWeakFormRichardsIE(time_step, h_time_prev, constitutive_relations)); // Initialize the FE problem. DiscreteProblem<double> dp(wf, space); // Initialize Newton solver. NewtonSolver<double> newton(&dp); newton.set_verbose_output(true); // Time stepping: int ts = 1; do { Hermes::Mixins::Loggable::Static::info("---- Time step %d, time %3.5f s", ts, current_time); // Perform Newton's iteration. try { newton.set_max_allowed_iterations(NEWTON_MAX_ITER); newton.solve(); } catch (Hermes::Exceptions::Exception e) { e.print_msg(); throw Hermes::Exceptions::Exception("Newton's iteration failed."); }; // Translate the resulting coefficient vector into the Solution<double> sln-> Solution<double>::vector_to_solution(newton.get_sln_vector(), space, h_time_prev); // Visualize the solution. char title[100]; sprintf(title, "Time %g s", current_time); view.set_title(title); view.show(h_time_prev); // Increase current time and time step counter. current_time += time_step; ts++; } while (current_time < T_FINAL); // Wait for the view to be closed. View::wait(); return 0; }
bool ModelIO::load_obj(const string& path, std::ifstream& fin, TriangleMesh * p_triangle_mesh) { char buffer[256]; char str[256]; float f1, f2, f3; int n_triangles = 0; int n_vertices = 0; vector<Point> positions; vector<Normal> normals; vector<Vec2> uvs; vector<int> p_index; vector<int> uv_index; vector<int> n_index; char face[3][32]; int line_number = 0; info("Loading model %s\n", path.c_str()); while (!fin.getline(buffer, 255).eof()) { buffer[255] = '\0'; sscanf_s(buffer, "%s", str, 255); //info(buffer); line_number++; // vertex if ((buffer[0] == 'v') && (buffer[1] == ' ' || buffer[1] == 32)) { if (sscanf_s(buffer, "v %f %f %f", &f1, &f2, &f3) == 3) { positions.push_back(Point(f1, f2, f3)); //info("%f %f %f\n", f1, f2, f3); n_vertices++; } else { error("vertex not in wanted format in load_obj at line: %d\n", line_number); return false; } } else if(buffer[0] == 'v' && (buffer[1] == 't')) { if (sscanf_s(buffer, "vt %f %f", &f1, &f2) == 2) { uvs.push_back(Vec2(f1, f2)); } else { error("vertex not in wanted format in load_obj at line: %d\n", line_number); return false; } } else if (buffer[0] == 'v' && (buffer[1] == 'n')) { if (sscanf_s(buffer, "vn %f %f %f", &f1, &f2, &f3) == 3) { normals.push_back(Normal(f1, f2, f3)); } else { error("vertex not in wanted format in load_obj at line: %d\n", line_number); return false; } } else if(buffer[0] == 'f' && buffer[1] == ' ') { if (sscanf_s(buffer, "f %s %s %s", face, 32, face+1, 32, face+2, 32) == 3) { int v, u, n; //info("%s %s %s\n", face, face + 1, face + 2); for (int i = 0; i < 3; i++) { split_string(face[i], &v, &u, &n); --v, --u, --n; if (v < 0) v = positions.size() + v + 1; if (u < 0) u = uvs.size() + u + 1; if (n < 0) n = normals.size() + n + 1; if (v < 0 || u < 0 || n < 0) { error(" face index error [load_obj] at line: %d\n", line_number); return false; } p_index.push_back(v); uv_index.push_back(u); n_index.push_back(n); } n_triangles++; } else { error("vertex not in wanted format in load_obj at line: %d\n", line_number); return false; } } } Assert(positions.size() == n_vertices); Assert(p_index.size() == n_triangles * 3); if (n_index.size() != n_triangles * 3) { //error("some vertices do not have normal!\n"); info("some vertices do not have normal. Generate normals...\n"); ModelProcessing::Mesh mesh(positions, p_index); mesh.generate_normals(&normals); n_index.clear(); n_index.reserve(n_triangles * 3); for (int i = 0; i < n_triangles * 3; ++i) { n_index.push_back(p_index[i]); } //return nullptr; } //if (!normals.empty()) { //Assert(n_index.size() == n_triangles * 3); if (n_index.size() != n_triangles * 3) { error("some vertices do not have normal!\n"); return false; } //} //if (!uvs.empty()) { //Assert(uv_index.size() == n_triangles * 3); if (uv_index.size() != n_triangles * 3) { error("some vertices do not have uv!\n"); return false; } //} info("Model loading finished. n_triangles = %d, n_vertices = %d.\n", n_triangles, n_vertices); vector<Point> new_points; vector<Vec2> new_uvs; vector<Normal> new_normals; vector<int> uv_id_at_p(n_vertices, -1); new_uvs.resize(n_vertices); new_normals.resize(n_vertices); uv_id_at_p.resize(n_vertices); for (int i = 0; i < n_triangles; i++) { int v = i * 3; for (int j = 0; j < 3; ++j) { int id_at_p_array = p_index[v]; if (uv_id_at_p[id_at_p_array] == -1) { uv_id_at_p[id_at_p_array] = uv_index[v]; new_uvs[id_at_p_array] = uvs[uv_index[v]]; new_normals[id_at_p_array] = normals[n_index[v]]; } else if (uv_id_at_p[id_at_p_array] != uv_index[v]) { // this vertex has other uv already int new_id = (int)positions.size(); positions.push_back(positions[p_index[v]]); // add a new vertex new_uvs.push_back(uvs[uv_index[v]]); new_normals.push_back(normals[n_index[v]]); p_index[v] = new_id; } v++; } } info("Model processing finished. Add %d new vertices. %d vertices in all\n", positions.size() - n_vertices, positions.size()); n_vertices = (int)positions.size(); //return new TriangleMesh(&Transform::identity, &Transform::identity, false, n_triangles, n_vertices, // &p_index[0], &positions[0], &new_normals[0], nullptr, &new_uvs[0], nullptr); p_triangle_mesh->_set_data(n_triangles, n_vertices, &p_index[0], &positions[0], &new_normals[0], nullptr, &new_uvs[0]); return true; }
main( int argc, char **argv ) { int silent_mode, // Option for silent mode (no time output to stdout) optDetachLim, // Option for detachment-limited erosion only optMeander, // Option for stream meandering optFloodplainDep, // Option for floodplain (overbank) deposition optLoessDep, // Option for eolian deposition optDiffuseDepo; // Option for deposition / no deposition by diff'n tStreamMeander *meander; // -> meander object tFloodplain *floodplain; // -> floodplain object tEolian *loess; // -> eolian deposition object ofstream oefile; /****************** INITIALIZATION *************************************\ ** ALGORITHM ** Get command-line arguments (name of input file + any other opts) ** Set silent_mode flag ** Open main input file ** Create and initialize objects for... ** Mesh ** Output files ** Storm ** Stream network ** Erosion ** Uplift (or baselevel change) ** Run timer ** Write output for initial state ** Get options for erosion type, meandering, etc. \**********************************************************************/ // Check command-line arguments if( argc<2 ) { cerr << "Usage: " << argv[0] << " <input file>" << endl; ReportFatalError( "You need to give the name of an input file." ); } // Check whether we're in silent mode silent_mode = ( argc>2 && argv[2][1]=='s' ); // Say hello cout << "\nThis is CHILD, version " << VERSION << endl << "Geoarchaeology special version 1.0" << endl << endl; // Open main input file tInputFile inputFile( argv[1] ); // Create and initialize objects: cout << "Creating mesh...\n"; tMesh<tLNode> mesh( inputFile ); cout << "Creating output files...\n"; tLOutput<tLNode> output( &mesh, inputFile ); tStorm storm( inputFile ); cout << "Creating stream network...\n"; tStreamNet strmNet( mesh, storm, inputFile ); tErosion erosion( &mesh, inputFile ); tUplift uplift( inputFile ); cout << "Writing data for time zero...\n"; tRunTimer time( inputFile, !silent_mode ); output.WriteOutput( 0 ); cout << "Initialization done.\n"; // Get various options optDetachLim = inputFile.ReadItem( optDetachLim, "OPTDETACHLIM" ); optMeander = inputFile.ReadItem( optMeander, "OPTMNDR" ); optDiffuseDepo = inputFile.ReadItem( optDiffuseDepo, "OPTDIFFDEP" ); optFloodplainDep = inputFile.ReadItem( optFloodplainDep, "OPTFLOODPLAIN" ); optLoessDep = inputFile.ReadItem( optLoessDep, "OPTLOESSDEP" ); // If applicable, create stream meander object if( optMeander ) meander = new tStreamMeander( strmNet, mesh, inputFile ); // If applicable, create floodplain object if( optFloodplainDep ) floodplain = new tFloodplain( inputFile, &mesh ); // If applicable, create eolian deposition object if( optLoessDep ) loess = new tEolian( inputFile ); // For Geoarchaeology special application double kr = inputFile.ReadItem( kr, "KR" ); double drop = inputFile.ReadItem( drop, "GA_VALDROP" ); double inletElev = inputFile.ReadItem( inletElev, "GA_INLETELEV" ); double meanInletElev = inletElev; double period = inputFile.ReadItem( period, "GA_PERIOD" ); int optwave = inputFile.ReadItem( optwave, "GA_OPTWAVE" ); double amplitude = inputFile.ReadItem( amplitude, "GA_AMPLITUDE" ); double tpeak, mr, mf, ttime, oldar1, noise0, noise1; int numpts; //number of points in the floodplain curve int fpindex=0;//where are you in the floodplain data double fpslope;//slope of floodplain curve double chanslp;//slope of channel, read in if optwave==2 tArray<double> fpht; tArray<double> fptime; if( optwave==0 ) period = 2.0 * PI / period; else if( optwave==1 ) { tpeak = inputFile.ReadItem( tpeak, "GA_TPEAK" ); if( tpeak<=0.0 || tpeak>=1.0 ) ReportFatalError("GA_TPEAK must be between 0 and 1 (not inclusive"); tpeak = tpeak*period; mr = amplitude/tpeak; mf = amplitude/(period-tpeak); oldar1=0; noise0=0; noise1=0; oefile.open("Geoarch/outletelev"); } else if( optwave==2){ numpts=inputFile.ReadItem( numpts, "NUMFLDPLNPTS" ); fpht.setSize( numpts ); fptime.setSize( numpts ); int i=0; char add='1'; char add2='0'; char name[30]; double help; double inittime; chanslp=inputFile.ReadItem(chanslp, "CHANSLOPE" ); cout<<"channel slope is "<<chanslp<<endl; inittime=inputFile.ReadItem(inittime, "INPUTTIME" ); while (i<numpts){ if(i<9){ strcpy(name, "FLDPLNTIME" ); strcat(name, &add ); help=inputFile.ReadItem(help,name); fptime[i]=help+inittime; cout<<"index "<<i<<" fldplntime "<<fptime[i]; strcpy(name, "FLDPLNHT" ); strcat(name, &add ); help=inputFile.ReadItem(help,name); fpht[i]=help; cout<<" fldplnht "<<fpht[i]<<endl; i++; add++; } if(i>=9){ add='1'; strcpy(name, "FLDPLNTIME" ); strcat(name, &add ); strcat(name, &add2 ); help=inputFile.ReadItem(help,name); fptime[i]=help; cout<<"index "<<i<<" fldplntime "<<fptime[i]; strcpy(name, "FLDPLNHT" ); strcat(name, &add ); strcat(name, &add2 ); help=inputFile.ReadItem(help,name); fpht[i]=help; cout<<" fldplnht "<<fpht[i]<<endl; i++; add2++; } } fpslope=(fpht[fpindex+1]-fpht[fpindex])/(fptime[fpindex+1]-fptime[fpindex]); oefile.open("Terraces/outletelev"); } int numg = inputFile.ReadItem( numg, "NUMGRNSIZE" ); //if( numg<2 ) ReportFatalError("Must use at least 2 sizes with GA." ); tArray<double> deparr( numg ); int i; for( i=0; i<numg; i++ ) deparr[i] = 0.0; assert( strmNet.getInletNodePtr() != 0 ); /**************** MAIN LOOP ******************************************\ ** ALGORITHM ** Generate storm ** Do storm... ** Update network (flow directions, drainage area, runoff) ** Water erosion/deposition (vertical) ** Meandering (if applicable) ** Floodplain deposition (if applicable) ** Do interstorm... ** Hillslope transport ** Eolian (loess) deposition (if applicable) ** Uplift (or baselevel change) **********************************************************************/ while( !time.IsFinished() ) { time.ReportTimeStatus(); // Do storm... storm.GenerateStorm( time.getCurrentTime(), strmNet.getInfilt(), strmNet.getSoilStore() ); //cout << storm.getRainrate() << " " << storm.getStormDuration() << " " // << storm.interstormDur() << endl; //cin >> dbg; strmNet.UpdateNet( time.getCurrentTime(), storm ); // Addition for Geoarchaeology model: set erodibility of main // stream to zero and set its profile elevations as a boundary // condition tMeshListIter<tLNode> nodeIter( mesh.getNodeList() ); tLNode *cn; double elev, totlen; // Start by resetting erodibility for all nodes; will be overridden // to zero for main channel nodes for( cn=nodeIter.FirstP(); nodeIter.IsActive(); cn=nodeIter.NextP() ) cn->setLayerErody( 0, kr ); // Set the new drop elevation if( optwave==0 ) { inletElev = drop + amplitude * sin( period*time.getCurrentTime() ); //cout << "Inlet " << inletElev << " at " << time.getCurrentTime() << endl; } else if( optwave==1 ) { noise1=(0.99*noise0+drand48()-0.5)*0.9; ttime = fmod( time.getCurrentTime(), period ); if( ttime<=tpeak ) inletElev = drop + mr*ttime + noise1; else inletElev = drop + amplitude - mf*(ttime-tpeak) + noise1; noise0=noise1; oefile<<noise1<<endl; } else if( optwave==2 ) { if(time.getCurrentTime()>=fptime[fpindex] && time.getCurrentTime()<fptime[fpindex+1]){ //slope and index don't need to be changed, just calculate elev inletElev = fpht[fpindex] + fpslope*(time.getCurrentTime()-fptime[fpindex]); } else{ //calculate new slope and update index fpindex++; fpslope=(fpht[fpindex+1]-fpht[fpindex])/(fptime[fpindex+1]-fptime[fpindex]); inletElev = fpht[fpindex] + fpslope*(time.getCurrentTime()-fptime[fpindex]); } cout<<"fhpt = "<<fpht[fpindex]<<" fpslope "<<fpslope<<" current time "<<time.getCurrentTime()<<" fptime "<<fptime[fpindex]<<endl; oefile<<inletElev<<endl; } // Find the total length of the main channel and compute slope if( optwave<2){ cn = strmNet.getInletNodePtr(); totlen = 0.0; do { cn->setLayerErody( 0, 0.0 ); // Main channel elev is a B.C., thus unerodible totlen += cn->getFlowEdg()->getLength(); cn = cn->getDownstrmNbr(); } while( cn->getBoundaryFlag()==kNonBoundary ); chanslp = drop/totlen; } // Now set elevations along main channel elev = inletElev; // starting at elevation at inlet cn = strmNet.getInletNodePtr(); // begin at inlet cn->setZ( inletElev ); // set inlet's elev do // work downstream along main channel, setting elevations { double delz; elev = elev - chanslp * cn->getFlowEdg()->getLength(); cn = cn->getDownstrmNbr(); delz = elev - cn->getZ(); while( delz < -0.1 ) { // Test: erode one active layer thick at a tm deparr[0] = -0.1; cn->EroDep( 0, deparr, time.getCurrentTime() ); delz += 0.1; } deparr[0] = delz; cn->EroDep( 0, deparr, time.getCurrentTime() ); } while( cn->getBoundaryFlag()==kNonBoundary ); //cout << "eroding...\n"; if( optDetachLim ) erosion.ErodeDetachLim( storm.getStormDuration() ); else erosion.DetachErode( storm.getStormDuration(), &strmNet, time.getCurrentTime() ); //cout << "meandering...\n"; if( optMeander ) meander->Migrate( time.getCurrentTime() ); //cout << "overbanking...\n"; if( optFloodplainDep ) floodplain->DepositOverbank( storm.getRainrate(), storm.getStormDuration(), time.getCurrentTime() ); // Do interstorm... //cout << "Doing diffusion\n"; erosion.Diffuse( storm.getStormDuration() + storm.interstormDur(), optDiffuseDepo ); //cout << "exposure time...\n"; erosion.UpdateExposureTime( storm.getStormDuration() + storm.interstormDur() ); if( optLoessDep ) loess->DepositLoess( &mesh, storm.getStormDuration()+storm.interstormDur(), time.getCurrentTime() ); //cout << "Uplift\n"; if( time.getCurrentTime() < uplift.getDuration() ) uplift.DoUplift( &mesh, storm.getStormDuration() + storm.interstormDur() ); time.Advance( storm.getStormDuration() + storm.interstormDur() ); //cout << "Output\n"; if( time.CheckOutputTime() ) output.WriteOutput( time.getCurrentTime() ); } }
void filter::applyFilter(RichParameterSet* pars){ /// Draw the input vertices if overlay was required if(pars->getBool(overlayInput)){ Vector3VertexProperty points = mesh()->get_vertex_property<Vector3>(VPOINT); foreach(Vertex v, mesh()->vertices()) drawArea()->drawPoint(points[v],1,Qt::red); } bool isEmbed = pars->getBool(embedVertices); // We need normals mesh()->update_face_normals(); mesh()->update_vertex_normals(); QElapsedTimer timer; timer.start(); VoronoiHelper h(mesh(), drawArea()); h.computeVoronoiDiagram(); h.searchVoronoiPoles(); h.getMedialSpokeAngleAndRadii(); h.setToMedial(isEmbed); qDebug() << "[VOROMAT]" << timer.elapsed() << "ms"; /// Colorize one of the exposed properties if( pars->getBool(colorizeRadii) || pars->getBool(colorizeAngle) ){ drawArea()->setRenderer(mesh(),"Smooth"); std::string propname; if( pars->getBool(colorizeRadii) ) propname = VRADII; if( pars->getBool(colorizeAngle) ) propname = VANGLE; ColorizeHelper(mesh(),unsignedColorMap).vscalar_to_vcolor(propname); // qDebug() << ScalarStatisticsHelper(mesh).statistics(propname); } #if 0 /// Test of lambda medial axis drawArea()->deleteAllRenderObjects(); Vector3VertexProperty vpoles = mesh()->get_vertex_property<Vector3>(VPOLE); ScalarVertexProperty vangles = mesh()->get_vertex_property<Scalar>(VANGLE); SurfaceMeshModel* model = new SurfaceMeshModel("", "cloud"); qDebug() << StatisticsHelper<Scalar>(mesh()).statistics(VANGLE); document()->addModel(model); drawArea()->setRenderer(mesh(),"Transparent"); drawArea()->setRenderer(model, "Vertices as Dots"); foreach(Vertex v, mesh()->vertices()){ if(vangles[v] > 3.1415*.4) drawArea()->drawPoint(vpoles[v]); } #endif }
int main(int argc, char** argv){ //initialize libMesh LibMeshInit init(argc, argv); //parameters GetPot infile("fem_system_params.in"); const bool transient = infile("transient", true); const Real deltat = infile("deltat", 0.005); unsigned int n_timesteps = infile("n_timesteps", 20); const int nx = infile("nx",100); const int ny = infile("ny",100); const int nz = infile("nz",100); //const unsigned int dim = 3; const unsigned int max_r_steps = infile("max_r_steps", 3); const unsigned int max_r_level = infile("max_r_level", 3); const Real refine_percentage = infile("refine_percentage", 0.1); const Real coarsen_percentage = infile("coarsen_percentage", 0.0); const std::string indicator_type = infile("indicator_type", "kelly"); const bool write_error = infile("write_error",false); const bool flag_by_elem_frac = infile("flag_by_elem_frac",true); #ifdef LIBMESH_HAVE_EXODUS_API const unsigned int write_interval = infile("write_interval", 5); #endif // Create a mesh, with dimension to be overridden later, distributed // across the default MPI communicator. Mesh mesh(init.comm()); //create mesh unsigned int dim; if(nz == 0){ //to check if oscillations happen in 2D as well... dim = 2; MeshTools::Generation::build_square(mesh, nx, ny, 497150.0, 501750.0, 537350.0, 540650.0, QUAD9); }else{ dim = 3; MeshTools::Generation::build_cube(mesh, nx, ny, nz, 497150.0, 501750.0, 537350.0, 540650.0, 0.0, 100.0, HEX27); } // Print information about the mesh to the screen. mesh.print_info(); // Create an equation systems object. EquationSystems equation_systems (mesh); //name system ContamTransSysInv & system = equation_systems.add_system<ContamTransSysInv>("ContamTransInv"); //solve as steady or transient if(transient){ //system.time_solver = AutoPtr<TimeSolver>(new EulerSolver(system)); //backward Euler system.time_solver = AutoPtr<TimeSolver>(new SteadySolver(system)); std::cout << "\n\nAaahhh transience not yet available!\n" << std::endl; n_timesteps = 1; } else{ system.time_solver = AutoPtr<TimeSolver>(new SteadySolver(system)); libmesh_assert_equal_to (n_timesteps, 1); //this doesn't seem to work? } // Initialize the system equation_systems.init (); //initial conditions read_initial_parameters(); system.project_solution(initial_value, initial_grad, equation_systems.parameters); finish_initialization(); // Set the time stepping options... system.deltat = deltat; //...and the nonlinear solver options... NewtonSolver *solver = new NewtonSolver(system); system.time_solver->diff_solver() = AutoPtr<DiffSolver>(solver); solver->quiet = infile("solver_quiet", true); solver->verbose = !solver->quiet; solver->max_nonlinear_iterations = infile("max_nonlinear_iterations", 15); solver->relative_step_tolerance = infile("relative_step_tolerance", 1.e-3); solver->relative_residual_tolerance = infile("relative_residual_tolerance", 0.0); solver->absolute_residual_tolerance = infile("absolute_residual_tolerance", 0.0); // And the linear solver options solver->max_linear_iterations = infile("max_linear_iterations", 10000); solver->initial_linear_tolerance = infile("initial_linear_tolerance",1.e-13); solver->minimum_linear_tolerance = infile("minimum_linear_tolerance",1.e-13); solver->linear_tolerance_multiplier = infile("linear_tolerance_multiplier",1.e-3); // Mesh Refinement object - to test effect of constant refined mesh (not refined at every timestep) MeshRefinement mesh_refinement(mesh); mesh_refinement.refine_fraction() = refine_percentage; mesh_refinement.coarsen_fraction() = coarsen_percentage; mesh_refinement.max_h_level() = max_r_level; // Print information about the system to the screen. equation_systems.print_info(); ExodusII_IO exodusIO = ExodusII_IO(mesh); //for writing multiple timesteps to one file for (unsigned int r_step=0; r_step<max_r_steps; r_step++) { std::cout << "\nBeginning Solve " << r_step+1 << std::endl; for (unsigned int t_step=0; t_step != n_timesteps; ++t_step) { std::cout << "\n\nSolving time step " << t_step << ", time = " << system.time << std::endl; system.solve(); system.postprocess(); // Advance to the next timestep in a transient problem system.time_solver->advance_timestep(); } //end stepping through time loop std::cout << "\n Refining the mesh..." << std::endl; // The \p ErrorVector is a particular \p StatisticsVector // for computing error information on a finite element mesh. ErrorVector error; if (indicator_type == "patch") { // The patch recovery estimator should give a // good estimate of the solution interpolation // error. PatchRecoveryErrorEstimator error_estimator; error_estimator.set_patch_reuse(false); //anisotropy trips up reuse error_estimator.estimate_error (system, error); } else if (indicator_type == "kelly") { // The Kelly error estimator is based on // an error bound for the Poisson problem // on linear elements, but is useful for // driving adaptive refinement in many problems KellyErrorEstimator error_estimator; error_estimator.estimate_error (system, error); } // Write out the error distribution if(write_error){ std::ostringstream ss; ss << r_step; #ifdef LIBMESH_HAVE_EXODUS_API std::string error_output = "error_"+ss.str()+".e"; #else std::string error_output = "error_"+ss.str()+".gmv"; #endif error.plot_error( error_output, mesh ); } // This takes the error in \p error and decides which elements // will be coarsened or refined. if(flag_by_elem_frac) mesh_refinement.flag_elements_by_elem_fraction(error); else mesh_refinement.flag_elements_by_error_fraction (error); // This call actually refines and coarsens the flagged // elements. mesh_refinement.refine_and_coarsen_elements(); // This call reinitializes the \p EquationSystems object for // the newly refined mesh. One of the steps in the // reinitialization is projecting the \p solution, // \p old_solution, etc... vectors from the old mesh to // the current one. equation_systems.reinit (); std::cout << "System has: " << equation_systems.n_active_dofs() << " degrees of freedom." << std::endl; } //end refinement loop //use that final refinement for (unsigned int t_step=0; t_step != n_timesteps; ++t_step) { std::cout << "\n\nSolving time step " << t_step << ", time = " << system.time << std::endl; system.solve(); system.postprocess(); Number QoI_computed = system.get_QoI_value("computed", 0); std::cout<< "Computed QoI is " << std::setprecision(17) << QoI_computed << std::endl; // Advance to the next timestep in a transient problem system.time_solver->advance_timestep(); } //end stepping through time loop #ifdef LIBMESH_HAVE_EXODUS_API for (unsigned int t_step=0; t_step != n_timesteps; ++t_step) { // Write out this timestep if we're requested to if ((t_step+1)%write_interval == 0) { std::ostringstream ex_file_name; std::ostringstream tplot_file_name; // We write the file in the ExodusII format. //ex_file_name << "out_" // << std::setw(3) // << std::setfill('0') // << std::right // << t_step+1 // << ".e"; tplot_file_name << "out_" << std::setw(3) << std::setfill('0') << std::right << t_step+1 << ".plt"; //ExodusII_IO(mesh).write_timestep(ex_file_name.str(), // equation_systems, // 1, /* This number indicates how many time steps // are being written to the file */ // system.time); exodusIO.write_timestep("output.exo", equation_systems, t_step+1, system.time); //outputs all timesteps in one file TecplotIO(mesh).write_equation_systems(tplot_file_name.str(), equation_systems); } } #endif // #ifdef LIBMESH_HAVE_EXODUS_API // All done. return 0; } //end main
std::string reMeshRenderable::meshFileName() const { return _mesh ? reRadial::shared()->assetLoader()->relativePath(mesh()->path()) : ""; }
int reMeshRenderable::getEffect() { return mesh()->hasSkin ? reEFFECT_SKIN : reEFFECT_DEFAULT; }
reMeshRenderable::~reMeshRenderable() { reRadial::shared()->assetLoader()->releaseShared(mesh()); }
MeshLib::Mesh* RasterToMesh::convert( double const*const img, GeoLib::RasterHeader const& header, MeshElemType elem_type, UseIntensityAs intensity_type, std::string const& array_name) { if ((elem_type != MeshElemType::TRIANGLE) && (elem_type != MeshElemType::QUAD) && (elem_type != MeshElemType::HEXAHEDRON) && (elem_type != MeshElemType::PRISM)) { ERR("Invalid Mesh Element Type."); return nullptr; } if (((elem_type == MeshElemType::TRIANGLE) || (elem_type == MeshElemType::QUAD)) && header.n_depth != 1) { ERR("Triangle or Quad elements cannot be used to construct meshes from 3D rasters."); return nullptr; } if (intensity_type == UseIntensityAs::ELEVATION && ((elem_type == MeshElemType::PRISM) || (elem_type == MeshElemType::HEXAHEDRON))) { ERR("Elevation mapping can only be performed for 2D meshes."); return nullptr; } MathLib::Point3d mesh_origin(std::array<double, 3>{ {header.origin[0] - (header.cell_size / 2.0), header.origin[1] - (header.cell_size / 2.0), header.origin[2]}}); std::unique_ptr<MeshLib::Mesh> mesh (nullptr); if (elem_type == MeshElemType::TRIANGLE) { mesh.reset( MeshLib::MeshGenerator::generateRegularTriMesh(header.n_cols, header.n_rows, header.cell_size, mesh_origin, "RasterDataMesh")); } else if (elem_type == MeshElemType::QUAD) { mesh.reset( MeshLib::MeshGenerator::generateRegularQuadMesh(header.n_cols, header.n_rows, header.cell_size, mesh_origin, "RasterDataMesh")); } else if (elem_type == MeshElemType::PRISM) { mesh.reset( MeshLib::MeshGenerator::generateRegularPrismMesh(header.n_cols, header.n_rows, header.n_depth, header.cell_size, mesh_origin, "RasterDataMesh")); } else if (elem_type == MeshElemType::HEXAHEDRON) { mesh.reset( MeshLib::MeshGenerator::generateRegularHexMesh(header.n_cols, header.n_rows, header.n_depth, header.cell_size, mesh_origin, "RasterDataMesh")); } MeshLib::Mesh* new_mesh(nullptr); std::vector<std::size_t> elements_to_remove; if (intensity_type == UseIntensityAs::ELEVATION) { std::vector<MeshLib::Node*> const& nodes(mesh->getNodes()); std::vector<MeshLib::Element*> const& elems(mesh->getElements()); std::size_t const n_nodes(elems[0]->getNumberOfNodes()); bool const double_idx = (elem_type == MeshElemType::TRIANGLE) || (elem_type == MeshElemType::PRISM); std::size_t const m = (double_idx) ? 2 : 1; for (std::size_t k = 0; k < header.n_depth; k++) { std::size_t const layer_idx = (k*header.n_rows*header.n_cols); for (std::size_t i = 0; i < header.n_cols; i++) { std::size_t const idx(i * header.n_rows + layer_idx); for (std::size_t j = 0; j < header.n_rows; j++) { double const val(img[idx + j]); if (val == header.no_data) { elements_to_remove.push_back(m * (idx + j)); if (double_idx) { elements_to_remove.push_back(m * (idx + j) + 1); } continue; } for (std::size_t n = 0; n < n_nodes; ++n) { (*(nodes[elems[m * (idx + j)]->getNodeIndex(n)]))[2] = val; if (double_idx) { (*(nodes[elems[m * (idx + j) + 1]->getNodeIndex( n)]))[2] = val; } } } } } } else { MeshLib::Properties &properties = mesh->getProperties(); MeshLib::ElementSearch ex(*mesh); if (array_name == "MaterialIDs") { auto* const prop_vec = properties.createNewPropertyVector<int>( array_name, MeshLib::MeshItemType::Cell, 1); fillPropertyVector<int>(*prop_vec, img, header, elem_type); ex.searchByPropertyValue<int>(array_name, static_cast<int>(header.no_data)); } else { auto* const prop_vec = properties.createNewPropertyVector<double>( array_name, MeshLib::MeshItemType::Cell, 1); fillPropertyVector<double>(*prop_vec, img, header, elem_type); ex.searchByPropertyValue<double>(array_name, header.no_data); } elements_to_remove = ex.getSearchedElementIDs(); } if (!elements_to_remove.empty()) { new_mesh = MeshLib::removeElements(*mesh, elements_to_remove, mesh->getName()); } else { new_mesh = mesh.release(); } if (intensity_type == UseIntensityAs::NONE) { new_mesh->getProperties().removePropertyVector(array_name); } return new_mesh; }
int main(int argc, char** argv) { LibMeshInit init(argc, argv); GetPot cl(argc, argv); int dim = -1; if (!cl.search("--dim")) { std::cerr << "No --dim argument found!" << std::endl; usage_error(argv[0]); } dim = cl.next(dim); Mesh mesh(dim); if(!cl.search("--input")) { std::cerr << "No --input argument found!" << std::endl; usage_error(argv[0]); } const char* meshname = cl.next("mesh.xda"); mesh.read(meshname); std::cout << "Loaded mesh " << meshname << std::endl; if(!cl.search("--newbcid")) { std::cerr << "No --bcid argument found!" << std::endl; usage_error(argv[0]); } boundary_id_type bcid = 0; bcid = cl.next(bcid); Point minnormal(-std::numeric_limits<Real>::max(), -std::numeric_limits<Real>::max(), -std::numeric_limits<Real>::max()); Point maxnormal(std::numeric_limits<Real>::max(), std::numeric_limits<Real>::max(), std::numeric_limits<Real>::max()); Point minpoint(-std::numeric_limits<Real>::max(), -std::numeric_limits<Real>::max(), -std::numeric_limits<Real>::max()); Point maxpoint(std::numeric_limits<Real>::max(), std::numeric_limits<Real>::max(), std::numeric_limits<Real>::max()); if (cl.search("--minnormalx")) minnormal(0) = cl.next(minnormal(0)); if (cl.search("--minnormalx")) minnormal(0) = cl.next(minnormal(0)); if (cl.search("--maxnormalx")) maxnormal(0) = cl.next(maxnormal(0)); if (cl.search("--minnormaly")) minnormal(1) = cl.next(minnormal(1)); if (cl.search("--maxnormaly")) maxnormal(1) = cl.next(maxnormal(1)); if (cl.search("--minnormalz")) minnormal(2) = cl.next(minnormal(2)); if (cl.search("--maxnormalz")) maxnormal(2) = cl.next(maxnormal(2)); if (cl.search("--minpointx")) minpoint(0) = cl.next(minpoint(0)); if (cl.search("--maxpointx")) maxpoint(0) = cl.next(maxpoint(0)); if (cl.search("--minpointy")) minpoint(1) = cl.next(minpoint(1)); if (cl.search("--maxpointy")) maxpoint(1) = cl.next(maxpoint(1)); if (cl.search("--minpointz")) minpoint(2) = cl.next(minpoint(2)); if (cl.search("--maxpointz")) maxpoint(2) = cl.next(maxpoint(2)); std::cout << "min point = " << minpoint << std::endl; std::cout << "max point = " << maxpoint << std::endl; std::cout << "min normal = " << minnormal << std::endl; std::cout << "max normal = " << maxnormal << std::endl; bool matcholdbcid = false; boundary_id_type oldbcid = 0; if (cl.search("--oldbcid")) { matcholdbcid = true; oldbcid = cl.next(oldbcid); if (oldbcid < 0) oldbcid = BoundaryInfo::invalid_id; } AutoPtr<FEBase> fe = FEBase::build(dim, FEType(FIRST,LAGRANGE)); QGauss qface(dim-1, CONSTANT); fe->attach_quadrature_rule(&qface); const std::vector<Point> &face_points = fe->get_xyz(); const std::vector<Point> &face_normals = fe->get_normals(); MeshBase::element_iterator el = mesh.elements_begin(); const MeshBase::element_iterator end_el = mesh.elements_end(); for (; el != end_el; ++el) { Elem *elem = *el; unsigned int n_sides = elem->n_sides(); for (unsigned int s=0; s != n_sides; ++s) { if (elem->neighbor(s)) continue; fe->reinit(elem,s); const Point &p = face_points[0]; const Point &n = face_normals[0]; //std::cout << "elem = " << elem->id() << std::endl; //std::cout << "centroid = " << elem->centroid() << std::endl; //std::cout << "p = " << p << std::endl; //std::cout << "n = " << n << std::endl; if (p(0) > minpoint(0) && p(0) < maxpoint(0) && p(1) > minpoint(1) && p(1) < maxpoint(1) && p(2) > minpoint(2) && p(2) < maxpoint(2) && n(0) > minnormal(0) && n(0) < maxnormal(0) && n(1) > minnormal(1) && n(1) < maxnormal(1) && n(2) > minnormal(2) && n(2) < maxnormal(2)) { if (matcholdbcid && mesh.boundary_info->boundary_id(elem, s) != oldbcid) continue; mesh.boundary_info->remove_side(elem, s); mesh.boundary_info->add_side(elem, s, bcid); //std::cout << "Set element " << elem->id() << " side " << s << // " to boundary " << bcid << std::endl; } } } std::string outputname; if(cl.search("--output")) { outputname = cl.next("mesh.xda"); } else { outputname = "new."; outputname += meshname; } mesh.write(outputname.c_str()); std::cout << "Wrote mesh " << outputname << std::endl; return 0; }
// The main program. int main (int argc, char** argv) { // Skip adaptive examples on a non-adaptive libMesh build #ifndef LIBMESH_ENABLE_AMR libmesh_example_requires(false, "--enable-amr"); #else // Skip this 2D example if libMesh was compiled as 1D-only. libmesh_example_requires(2 <= LIBMESH_DIM, "2D support"); // Initialize libMesh. LibMeshInit init (argc, argv); std::cout << "Started " << argv[0] << std::endl; // Make sure the general input file exists, and parse it { std::ifstream i("general.in"); if (!i) libmesh_error_msg('[' << init.comm().rank() << "] Can't find general.in; exiting early."); } GetPot infile("general.in"); // Read in parameters from the input file FEMParameters param(init.comm()); param.read(infile); // Create a mesh with the given dimension, distributed // across the default MPI communicator. Mesh mesh(init.comm(), param.dimension); // And an object to refine it UniquePtr<MeshRefinement> mesh_refinement(new MeshRefinement(mesh)); // And an EquationSystems to run on it EquationSystems equation_systems (mesh); std::cout << "Building mesh" << std::endl; // Build a unit square ElemType elemtype; if (param.elementtype == "tri" || param.elementtype == "unstructured") elemtype = TRI3; else elemtype = QUAD4; MeshTools::Generation::build_square (mesh, param.coarsegridx, param.coarsegridy, param.domain_xmin, param.domain_xmin + param.domain_edge_width, param.domain_ymin, param.domain_ymin + param.domain_edge_length, elemtype); std::cout << "Building system" << std::endl; HeatSystem &system = equation_systems.add_system<HeatSystem> ("HeatSystem"); set_system_parameters(system, param); std::cout << "Initializing systems" << std::endl; // Initialize the system equation_systems.init (); // Refine the grid again if requested for (unsigned int i=0; i != param.extrarefinements; ++i) { mesh_refinement->uniformly_refine(1); equation_systems.reinit(); } std::cout<<"Setting primal initial conditions"<<std::endl; read_initial_parameters(); system.project_solution(initial_value, initial_grad, equation_systems.parameters); // Output the H1 norm of the initial conditions libMesh::out << "|U(" <<system.time<< ")|= " << system.calculate_norm(*system.solution, 0, H1) << std::endl<<std::endl; // Add an adjoint vector, this will be computed after the forward // time stepping is complete // // Tell the library not to save adjoint solutions during the forward // solve // // Tell the library not to project this vector, and hence, memory // solution history to not save it. // // Make this vector ghosted so we can localize it to each element // later. const std::string & adjoint_solution_name = "adjoint_solution0"; system.add_vector("adjoint_solution0", false, GHOSTED); // Close up any resources initial.C needed finish_initialization(); // Plot the initial conditions write_output(equation_systems, 0, "primal"); // Print information about the mesh and system to the screen. mesh.print_info(); equation_systems.print_info(); // In optimized mode we catch any solver errors, so that we can // write the proper footers before closing. In debug mode we just // let the exception throw so that gdb can grab it. #ifdef NDEBUG try { #endif // Now we begin the timestep loop to compute the time-accurate // solution of the equations. for (unsigned int t_step=param.initial_timestep; t_step != param.initial_timestep + param.n_timesteps; ++t_step) { // A pretty update message std::cout << " Solving time step " << t_step << ", time = " << system.time << std::endl; // Solve the forward problem at time t, to obtain the solution at time t + dt system.solve(); // Output the H1 norm of the computed solution libMesh::out << "|U(" <<system.time + system.deltat<< ")|= " << system.calculate_norm(*system.solution, 0, H1) << std::endl; // Advance to the next timestep in a transient problem std::cout<<"Advancing timestep"<<std::endl<<std::endl; system.time_solver->advance_timestep(); // Write out this timestep write_output(equation_systems, t_step+1, "primal"); } // End timestep loop ///////////////// Now for the Adjoint Solution ////////////////////////////////////// // Now we will solve the backwards in time adjoint problem std::cout << std::endl << "Solving the adjoint problem" << std::endl; // We need to tell the library that it needs to project the adjoint, so // MemorySolutionHistory knows it has to save it // Tell the library to project the adjoint vector, and hence, memory solution history to // save it system.set_vector_preservation(adjoint_solution_name, true); std::cout << "Setting adjoint initial conditions Z("<<system.time<<")"<<std::endl; // Need to call adjoint_advance_timestep once for the initial condition setup std::cout<<"Retrieving solutions at time t="<<system.time<<std::endl; system.time_solver->adjoint_advance_timestep(); // Output the H1 norm of the retrieved solutions (u^i and u^i+1) libMesh::out << "|U(" <<system.time + system.deltat<< ")|= " << system.calculate_norm(*system.solution, 0, H1) << std::endl; libMesh::out << "|U(" <<system.time<< ")|= " << system.calculate_norm(system.get_vector("_old_nonlinear_solution"), 0, H1) << std::endl; // The first thing we have to do is to apply the adjoint initial // condition. The user should supply these. Here they are specified // in the functions adjoint_initial_value and adjoint_initial_gradient system.project_vector(adjoint_initial_value, adjoint_initial_grad, equation_systems.parameters, system.get_adjoint_solution(0)); // Since we have specified an adjoint solution for the current time (T), set the adjoint_already_solved boolean to true, so we dont solve unneccesarily in the adjoint sensitivity method system.set_adjoint_already_solved(true); libMesh::out << "|Z(" <<system.time<< ")|= " << system.calculate_norm(system.get_adjoint_solution(), 0, H1) << std::endl<<std::endl; write_output(equation_systems, param.n_timesteps, "dual"); // Now that the adjoint initial condition is set, we will start the // backwards in time adjoint integration // For loop stepping backwards in time for (unsigned int t_step=param.initial_timestep; t_step != param.initial_timestep + param.n_timesteps; ++t_step) { //A pretty update message std::cout << " Solving adjoint time step " << t_step << ", time = " << system.time << std::endl; // The adjoint_advance_timestep // function calls the retrieve function of the memory_solution_history // class via the memory_solution_history object we declared earlier. // The retrieve function sets the system primal vectors to their values // at the current timestep std::cout<<"Retrieving solutions at time t="<<system.time<<std::endl; system.time_solver->adjoint_advance_timestep(); // Output the H1 norm of the retrieved solution libMesh::out << "|U(" <<system.time + system.deltat << ")|= " << system.calculate_norm(*system.solution, 0, H1) << std::endl; libMesh::out << "|U(" <<system.time<< ")|= " << system.calculate_norm(system.get_vector("_old_nonlinear_solution"), 0, H1) << std::endl; system.set_adjoint_already_solved(false); system.adjoint_solve(); // Now that we have solved the adjoint, set the adjoint_already_solved boolean to true, so we dont solve unneccesarily in the error estimator system.set_adjoint_already_solved(true); libMesh::out << "|Z(" <<system.time<< ")|= "<< system.calculate_norm(system.get_adjoint_solution(), 0, H1) << std::endl << std::endl; // Get a pointer to the primal solution vector NumericVector<Number> &primal_solution = *system.solution; // Get a pointer to the solution vector of the adjoint problem for QoI 0 NumericVector<Number> &dual_solution_0 = system.get_adjoint_solution(0); // Swap the primal and dual solutions so we can write out the adjoint solution primal_solution.swap(dual_solution_0); write_output(equation_systems, param.n_timesteps - (t_step + 1), "dual"); // Swap back primal_solution.swap(dual_solution_0); } // End adjoint timestep loop // Now that we have computed both the primal and adjoint solutions, we compute the sensitivties to the parameter p // dQ/dp = partialQ/partialp - partialR/partialp // partialQ/partialp = (Q(p+dp) - Q(p-dp))/(2*dp), this is not supported by the library yet // partialR/partialp = (R(u,z;p+dp) - R(u,z;p-dp))/(2*dp), where // R(u,z;p+dp) = int_{0}^{T} f(z;p+dp) - <partialu/partialt, z>(p+dp) - <g(u),z>(p+dp) // To do this we need to step forward in time, and compute the perturbed R at each time step and accumulate it // Then once all time steps are over, we can compute (R(u,z;p+dp) - R(u,z;p-dp))/(2*dp) // Now we begin the timestep loop to compute the time-accurate // adjoint sensitivities for (unsigned int t_step=param.initial_timestep; t_step != param.initial_timestep + param.n_timesteps; ++t_step) { // A pretty update message std::cout << "Retrieving " << t_step << ", time = " << system.time << std::endl; // Retrieve the primal and adjoint solutions at the current timestep system.time_solver->retrieve_timestep(); libMesh::out << "|U(" <<system.time + system.deltat << ")|= " << system.calculate_norm(*system.solution, 0, H1) << std::endl; libMesh::out << "|U(" <<system.time<< ")|= " << system.calculate_norm(system.get_vector("_old_nonlinear_solution"), 0, H1) << std::endl; libMesh::out << "|Z(" <<system.time<< ")|= "<< system.calculate_norm(system.get_adjoint_solution(0), 0, H1) << std::endl << std::endl; // Call the postprocess function which we have overloaded to compute // accumulate the perturbed residuals (dynamic_cast<HeatSystem&>(system)).perturb_accumulate_residuals(dynamic_cast<HeatSystem&>(system).get_parameter_vector()); // Move the system time forward (retrieve_timestep does not do this) system.time += system.deltat; } // A pretty update message std::cout << "Retrieving " << " final time = " << system.time << std::endl; // Retrieve the primal and adjoint solutions at the current timestep system.time_solver->retrieve_timestep(); libMesh::out << "|U(" <<system.time + system.deltat << ")|= " << system.calculate_norm(*system.solution, 0, H1) << std::endl; libMesh::out << "|U(" <<system.time<< ")|= " << system.calculate_norm(system.get_vector("_old_nonlinear_solution"), 0, H1) << std::endl; libMesh::out << "|Z(" <<system.time<< ")|= "<< system.calculate_norm(system.get_adjoint_solution(0), 0, H1) << std::endl<<std::endl; // Call the postprocess function which we have overloaded to compute // accumulate the perturbed residuals (dynamic_cast<HeatSystem&>(system)).perturb_accumulate_residuals(dynamic_cast<HeatSystem&>(system).get_parameter_vector()); // Now that we computed the accumulated, perturbed residuals, we can compute the // approximate sensitivity Number sensitivity_0_0 = (dynamic_cast<HeatSystem&>(system)).compute_final_sensitivity(); // Print it out std::cout<<"Sensitivity of QoI 0 w.r.t parameter 0 is: " << sensitivity_0_0 << std::endl; // Hard coded assert to ensure that the actual numbers we are // getting are what they should be // The 2e-4 tolerance is chosen to ensure success even with // 32-bit floats libmesh_assert_less(std::abs(sensitivity_0_0 - (-5.37173)), 2.e-4); #ifdef NDEBUG } catch (...) { std::cerr << '[' << mesh.processor_id() << "] Caught exception; exiting early." << std::endl; } #endif std::cerr << '[' << mesh.processor_id() << "] Completing output." << std::endl; // All done. return 0; #endif // LIBMESH_ENABLE_AMR }
neiGlobal.insert ( pointI, calcFaceCells ( isValidBFace, mesh().pointFaces()[pointI], pointGlobals ) ); } syncTools::syncPointMap ( mesh(), neiGlobal, unionEqOp(), false // apply separation ); } // Calculates per cell the neighbour data (= cell or boundary in global // numbering). First element is always cell itself! void Foam::CPCCellToCellStencil::calcCellStencil ( labelListList& globalCellCells ) const { // Calculate points on coupled patches
Foam::Map<Foam::label> Foam::refinementIterator::setRefinement ( const List<refineCell>& refCells ) { Map<label> addedCells(2*refCells.size()); Time& runTime = const_cast<Time&>(mesh_.time()); label nRefCells = refCells.size(); label oldRefCells = -1; // Operate on copy. List<refineCell> currentRefCells(refCells); bool stop = false; do { if (writeMesh_) { // Need different times to write meshes. runTime++; } polyTopoChange meshMod(mesh_); if (debug) { Pout<< "refinementIterator : refining " << currentRefCells.size() << " cells." << endl; } // Determine cut pattern. cellCuts cuts(mesh_, cellWalker_, currentRefCells); label nCuts = cuts.nLoops(); reduce(nCuts, sumOp<label>()); if (nCuts == 0) { if (debug) { Pout<< "refinementIterator : exiting iteration since no valid" << " loops found for " << currentRefCells.size() << " cells" << endl; fileName cutsFile("failedCuts_" + runTime.timeName() + ".obj"); Pout<< "Writing cuts for time " << runTime.timeName() << " to " << cutsFile << endl; OFstream cutsStream(cutsFile); labelList refCells(currentRefCells.size()); forAll(currentRefCells, i) { refCells[i] = currentRefCells[i].cellNo(); } meshTools::writeOBJ ( cutsStream, mesh().cells(), mesh().faces(), mesh().points(), refCells ); } break; } if (debug) { fileName cutsFile("cuts_" + runTime.timeName() + ".obj"); Pout<< "Writing cuts for time " << runTime.timeName() << " to " << cutsFile << endl; OFstream cutsStream(cutsFile); cuts.writeOBJ(cutsStream); } // Insert mesh refinement into polyTopoChange. meshRefiner_.setRefinement(cuts, meshMod); // // Do all changes // autoPtr<mapPolyMesh> morphMap = meshMod.changeMesh ( mesh_, false ); // Move mesh (since morphing does not do this) if (morphMap().hasMotionPoints()) { mesh_.movePoints(morphMap().preMotionPoints()); } // Update stored refinement pattern meshRefiner_.updateMesh(morphMap()); // Write resulting mesh if (writeMesh_) { if (debug) { Pout<< "Writing refined polyMesh to time " << runTime.timeName() << endl; } mesh_.write(); } // Update currentRefCells for new cell numbers. Use helper function // in meshCutter class. updateLabels ( morphMap->reverseCellMap(), currentRefCells ); // Update addedCells for new cell numbers updateLabels ( morphMap->reverseCellMap(), addedCells ); // Get all added cells from cellCutter (already in new numbering // from meshRefiner.updateMesh call) and add to global list of added const Map<label>& addedNow = meshRefiner_.addedCells(); for ( Map<label>::const_iterator iter = addedNow.begin(); iter != addedNow.end(); ++iter ) { if (!addedCells.insert(iter.key(), iter())) { FatalErrorIn("refinementIterator") << "Master cell " << iter.key() << " already has been refined" << endl << "Added cell:" << iter() << abort(FatalError); } } // Get failed refinement in new cell numbering and reconstruct input // to the meshRefiner. Is done by removing all refined cells from // current list of cells to refine. // Update refCells for new cell numbers. updateLabels ( morphMap->reverseCellMap(), currentRefCells ); // Pack refCells acc. to refined status nRefCells = 0; forAll(currentRefCells, refI) { const refineCell& refCell = currentRefCells[refI]; if (!addedNow.found(refCell.cellNo())) { if (nRefCells != refI) { currentRefCells[nRefCells++] = refineCell ( refCell.cellNo(), refCell.direction() ); } } } oldRefCells = currentRefCells.size(); currentRefCells.setSize(nRefCells); if (debug) { Pout<< endl; } // Stop only if all finished or all can't refine any further. stop = (nRefCells == 0) || (nRefCells == oldRefCells); reduce(stop, andOp<bool>()); } while (!stop); if (nRefCells == oldRefCells) { WarningIn("refinementIterator") << "stopped refining." << "Did not manage to refine a single cell" << endl << "Wanted :" << oldRefCells << endl; } return addedCells; }
int driver(const Box& global_box, Box& my_box, Parameters& params, YAML_Doc& ydoc) { int global_nx = global_box[0][1]; int global_ny = global_box[1][1]; int global_nz = global_box[2][1]; int numprocs = 1, myproc = 0; #ifdef HAVE_MPI MPI_Comm_size(MPI_COMM_WORLD, &numprocs); MPI_Comm_rank(MPI_COMM_WORLD, &myproc); #endif if (params.load_imbalance > 0) { add_imbalance<GlobalOrdinal>(global_box, my_box, params.load_imbalance, ydoc); } float largest_imbalance = 0, std_dev = 0; compute_imbalance<GlobalOrdinal>(global_box, my_box, largest_imbalance, std_dev, ydoc, true); //Create a representation of the mesh: //Note that 'simple_mesh_description' is a virtual or conceptual //mesh that doesn't actually store mesh data. if (myproc==0) { std::cout.width(30); std::cout << "creating/filling mesh..."; std::cout.flush(); } timer_type t_start = mytimer(); timer_type t0 = mytimer(); simple_mesh_description<GlobalOrdinal> mesh(global_box, my_box); timer_type mesh_fill = mytimer() - t0; timer_type t_total = mytimer() - t_start; if (myproc==0) { std::cout << mesh_fill << "s, total time: " << t_total << std::endl; } //next we will generate the matrix structure. //Declare matrix object: #if defined(MINIFE_ELL_MATRIX) typedef ELLMatrix<Scalar,LocalOrdinal,GlobalOrdinal> MatrixType; #else typedef CSRMatrix<Scalar,LocalOrdinal,GlobalOrdinal> MatrixType; #endif MatrixType A; timer_type gen_structure; RUN_TIMED_FUNCTION("generating matrix structure...", generate_matrix_structure(mesh, A), gen_structure, t_total); GlobalOrdinal local_nrows = A.rows.size(); GlobalOrdinal my_first_row = local_nrows > 0 ? A.rows[0] : -1; Vector<Scalar,LocalOrdinal,GlobalOrdinal> b(my_first_row, local_nrows); Vector<Scalar,LocalOrdinal,GlobalOrdinal> x(my_first_row, local_nrows); //Assemble finite-element sub-matrices and sub-vectors into the global //linear system: timer_type fe_assembly; RUN_TIMED_FUNCTION("assembling FE data...", assemble_FE_data(mesh, A, b, params), fe_assembly, t_total); if (myproc == 0) { ydoc.add("Matrix structure generation",""); ydoc.get("Matrix structure generation")->add("Mat-struc-gen Time",gen_structure); ydoc.add("FE assembly",""); ydoc.get("FE assembly")->add("FE assembly Time",fe_assembly); } #ifdef MINIFE_DEBUG write_matrix("A_prebc.mtx", A); write_vector("b_prebc.vec", b); #endif //Now apply dirichlet boundary-conditions //(Apply the 0-valued surfaces first, then the 1-valued surface last.) timer_type dirbc_time; RUN_TIMED_FUNCTION("imposing Dirichlet BC...", impose_dirichlet(0.0, A, b, global_nx+1, global_ny+1, global_nz+1, mesh.bc_rows_0), dirbc_time, t_total); RUN_TIMED_FUNCTION("imposing Dirichlet BC...", impose_dirichlet(1.0, A, b, global_nx+1, global_ny+1, global_nz+1, mesh.bc_rows_1), dirbc_time, t_total); #ifdef MINIFE_DEBUG write_matrix("A.mtx", A); write_vector("b.vec", b); #endif //Transform global indices to local, set up communication information: timer_type make_local_time; RUN_TIMED_FUNCTION("making matrix indices local...", make_local_matrix(A), make_local_time, t_total); #ifdef MINIFE_DEBUG write_matrix("A_local.mtx", A); write_vector("b_local.vec", b); #endif size_t global_nnz = compute_matrix_stats(A, myproc, numprocs, ydoc); //Prepare to perform conjugate gradient solve: LocalOrdinal max_iters = 200; LocalOrdinal num_iters = 0; typedef typename TypeTraits<Scalar>::magnitude_type magnitude; magnitude rnorm = 0; magnitude tol = std::numeric_limits<magnitude>::epsilon(); timer_type cg_times[NUM_TIMERS]; typedef Vector<Scalar,LocalOrdinal,GlobalOrdinal> VectorType; t_total = mytimer() - t_start; bool matvec_with_comm_overlap = params.mv_overlap_comm_comp==1; int verify_result = 0; #if MINIFE_KERNELS != 0 if (myproc==0) { std::cout.width(30); std::cout << "Starting kernel timing loops ..." << std::endl; } max_iters = 500; x.coefs[0] = 0.9; if (matvec_with_comm_overlap) { time_kernels(A, b, x, matvec_overlap<MatrixType,VectorType>(), max_iters, rnorm, cg_times); } else { time_kernels(A, b, x, matvec_std<MatrixType,VectorType>(), max_iters, rnorm, cg_times); } num_iters = max_iters; std::string title("Kernel timings"); #else if (myproc==0) { std::cout << "Starting CG solver ... " << std::endl; } if (matvec_with_comm_overlap) { #ifdef MINIFE_CSR_MATRIX rearrange_matrix_local_external(A); cg_solve(A, b, x, matvec_overlap<MatrixType,VectorType>(), max_iters, tol, num_iters, rnorm, cg_times); #else std::cout << "ERROR, matvec with overlapping comm/comp only works with CSR matrix."<<std::endl; #endif } else { cg_solve(A, b, x, matvec_std<MatrixType,VectorType>(), max_iters, tol, num_iters, rnorm, cg_times); if (myproc == 0) { std::cout << "Final Resid Norm: " << rnorm << std::endl; } if (params.verify_solution > 0) { double tolerance = 0.06; bool verify_whole_domain = false; #ifdef MINIFE_DEBUG verify_whole_domain = true; #endif if (myproc == 0) { if (verify_whole_domain) std::cout << "verifying solution..." << std::endl; else std::cout << "verifying solution at ~ (0.5, 0.5, 0.5) ..." << std::endl; } verify_result = verify_solution(mesh, x, tolerance, verify_whole_domain); } } #ifdef MINIFE_DEBUG write_vector("x.vec", x); #endif std::string title("CG solve"); #endif if (myproc == 0) { ydoc.get("Global Run Parameters")->add("ScalarType",TypeTraits<Scalar>::name()); ydoc.get("Global Run Parameters")->add("GlobalOrdinalType",TypeTraits<GlobalOrdinal>::name()); ydoc.get("Global Run Parameters")->add("LocalOrdinalType",TypeTraits<LocalOrdinal>::name()); ydoc.add(title,""); ydoc.get(title)->add("Iterations",num_iters); ydoc.get(title)->add("Final Resid Norm",rnorm); GlobalOrdinal global_nrows = global_nx; global_nrows *= global_ny*global_nz; //flops-per-mv, flops-per-dot, flops-per-waxpy: double mv_flops = global_nnz*2.0; double dot_flops = global_nrows*2.0; double waxpy_flops = global_nrows*3.0; #if MINIFE_KERNELS == 0 //if MINIFE_KERNELS == 0 then we did a CG solve, and in that case //there were num_iters+1 matvecs, num_iters*2 dots, and num_iters*3+2 waxpys. mv_flops *= (num_iters+1); dot_flops *= (2*num_iters); waxpy_flops *= (3*num_iters+2); #else //if MINIFE_KERNELS then we did one of each operation per iteration. mv_flops *= num_iters; dot_flops *= num_iters; waxpy_flops *= num_iters; #endif double total_flops = mv_flops + dot_flops + waxpy_flops; double mv_mflops = -1; if (cg_times[MATVEC] > 1.e-4) mv_mflops = 1.e-6 * (mv_flops/cg_times[MATVEC]); double dot_mflops = -1; if (cg_times[DOT] > 1.e-4) dot_mflops = 1.e-6 * (dot_flops/cg_times[DOT]); double waxpy_mflops = -1; if (cg_times[WAXPY] > 1.e-4) waxpy_mflops = 1.e-6 * (waxpy_flops/cg_times[WAXPY]); double total_mflops = -1; if (cg_times[TOTAL] > 1.e-4) total_mflops = 1.e-6 * (total_flops/cg_times[TOTAL]); ydoc.get(title)->add("WAXPY Time",cg_times[WAXPY]); ydoc.get(title)->add("WAXPY Flops",waxpy_flops); if (waxpy_mflops >= 0) ydoc.get(title)->add("WAXPY Mflops",waxpy_mflops); else ydoc.get(title)->add("WAXPY Mflops","inf"); ydoc.get(title)->add("DOT Time",cg_times[DOT]); ydoc.get(title)->add("DOT Flops",dot_flops); if (dot_mflops >= 0) ydoc.get(title)->add("DOT Mflops",dot_mflops); else ydoc.get(title)->add("DOT Mflops","inf"); ydoc.get(title)->add("MATVEC Time",cg_times[MATVEC]); ydoc.get(title)->add("MATVEC Flops",mv_flops); if (mv_mflops >= 0) ydoc.get(title)->add("MATVEC Mflops",mv_mflops); else ydoc.get(title)->add("MATVEC Mflops","inf"); #ifdef MINIFE_FUSED ydoc.get(title)->add("MATVECDOT Time",cg_times[MATVECDOT]); ydoc.get(title)->add("MATVECDOT Flops",mv_flops); if (mv_mflops >= 0) ydoc.get(title)->add("MATVECDOT Mflops",mv_mflops); else ydoc.get(title)->add("MATVECDOT Mflops","inf"); #endif #if MINIFE_KERNELS == 0 ydoc.get(title)->add("Total",""); ydoc.get(title)->get("Total")->add("Total CG Time",cg_times[TOTAL]); ydoc.get(title)->get("Total")->add("Total CG Flops",total_flops); if (total_mflops >= 0) ydoc.get(title)->get("Total")->add("Total CG Mflops",total_mflops); else ydoc.get(title)->get("Total")->add("Total CG Mflops","inf"); ydoc.get(title)->add("Time per iteration",cg_times[TOTAL]/num_iters); #endif } return verify_result; }
numArray< Scalar > DivergenceOperator::element(size_t e) const { numArray<Scalar> result; mesh()[e].div(m_field.element(e), result); return result; }
MStatus ScbWriter::dumpData() { MStatus status; MDagPath dag_path; MDagPath mesh_dag_path; MFnSkinCluster fn_skin_cluster; MSelectionList selection_list; if (MStatus::kSuccess != MGlobal::getActiveSelectionList(selection_list)) FAILURE("ScbWriter: MGlobal::getActiveSelectionList()"); MItSelectionList it_selection_list(selection_list, MFn::kMesh, &status); if (status != MStatus::kSuccess) FAILURE("ScbWriter: it_selection_list()"); if (it_selection_list.isDone()) FAILURE("ScbWriter: no mesh selected!"); it_selection_list.getDagPath(mesh_dag_path); it_selection_list.next(); if (!it_selection_list.isDone()) FAILURE("ScbWriter: more than one mesh selected!"); MFnMesh mesh(mesh_dag_path); strcpy_s(data_.name, ScbData::kNameLen, mesh.name().asChar()); // get shaders int instance_num = 0; if (mesh_dag_path.isInstanced()) instance_num = mesh_dag_path.instanceNumber(); MObjectArray shaders; MIntArray shader_indices; mesh.getConnectedShaders(instance_num, shaders, shader_indices); int shader_count = shaders.length(); MGlobal::displayInfo(MString("shaders for this mesh : ") + shader_count); // check for holes MIntArray hole_info_array; MIntArray hole_vertex_array; mesh.getHoles(hole_info_array, hole_vertex_array); if (hole_info_array.length() != 0) FAILURE("ScbWriter: mesh contains holes"); // check for triangulation MItMeshPolygon mesh_polygon_iter(mesh_dag_path); for (mesh_polygon_iter.reset(); !mesh_polygon_iter.isDone(); mesh_polygon_iter.next()) { if (!mesh_polygon_iter.hasValidTriangulation()) FAILURE("ScbWriter: a poly has no valid triangulation"); } // get transform MFnTransform transform_node(mesh.parent(0)); MVector pos = transform_node.getTranslation(MSpace::kTransform); // get vertices int num_vertices = mesh.numVertices(); data_.num_vtxs = num_vertices; MFloatPointArray vertex_array; mesh.getPoints(vertex_array, MSpace::kWorld); // kObject is done by removing translation values // that way scales and rotations are kept. for (int i = 0; i < num_vertices; i++) { ScbVtx vtx; vtx.x = vertex_array[i].x - static_cast<float>(pos.x); vtx.y = vertex_array[i].y - static_cast<float>(pos.y); vtx.z = vertex_array[i].z - static_cast<float>(pos.z); // check for min if (vtx.x < data_.bbx || i == 0) data_.bbx = vtx.x; if (vtx.y < data_.bby || i == 0) data_.bby = vtx.y; if (vtx.z < data_.bbz || i == 0) data_.bbz = vtx.z; // check for max if (vtx.x > data_.bbdx || i == 0) data_.bbdx = vtx.x; if (vtx.y > data_.bbdy || i == 0) data_.bbdy = vtx.y; if (vtx.z > data_.bbdz || i == 0) data_.bbdz = vtx.z; data_.vertices.push_back(vtx); } // bbd is the size so : data_.bbdx -= data_.bbx; data_.bbdy -= data_.bby; data_.bbdz -= data_.bbz; // get UVs MFloatArray u_array; MFloatArray v_array; mesh.getUVs(u_array, v_array); MIntArray uv_counts; MIntArray uv_ids; mesh.getAssignedUVs(uv_counts, uv_ids); // get triangles MIntArray triangle_counts; MIntArray triangle_vertices; mesh.getTriangles(triangle_counts, triangle_vertices); int numPolys = mesh.numPolygons(); // fill data int cur = 0; for (int i = 0; i < numPolys; i++) { int triangle_count = triangle_counts[i]; int shader = shader_indices[i]; for (int j = 0; j < triangle_count; j++) data_.shader_per_triangle.push_back(shader); for (int j = 0; j < triangle_count * 3; j++) { data_.indices.push_back(triangle_vertices[cur]); data_.u_vec.push_back(u_array[uv_ids[cur]]); data_.v_vec.push_back(1 - v_array[uv_ids[cur]]); cur++; } } data_.num_indices = cur; // fill materials for (int i = 0; i < shader_count; i++) { ScbMaterial material; // get the plug for SurfaceShader MPlug shader_plug = MFnDependencyNode(shaders[i]).findPlug("surfaceShader"); // get the connections to this plug MPlugArray plug_array; shader_plug.connectedTo(plug_array, true, false, &status); // first connection is material. MFnDependencyNode surface_shader(plug_array[0].node()); strcpy_s(material.name, ScbMaterial::kNameLen, surface_shader.name().asChar()); data_.materials.push_back(material); } return MS::kSuccess; }
void Foam::faceOnlySet::calcSamples ( DynamicList<point>& samplingPts, dynamicLabelList& samplingCells, dynamicLabelList& samplingFaces, dynamicLabelList& samplingSegments, DynamicList<scalar>& samplingCurveDist ) const { // distance vector between sampling points if (mag(end_ - start_) < SMALL) { FatalErrorIn("faceOnlySet::calcSamples()") << "Incorrect sample specification :" << " start equals end point." << endl << " start:" << start_ << " end:" << end_ << exit(FatalError); } const vector offset = (end_ - start_); const vector normOffset = offset/mag(offset); const vector smallVec = tol*offset; const scalar smallDist = mag(smallVec); // Get all boundary intersections List<pointIndexHit> bHits = searchEngine().intersections ( start_ - smallVec, end_ + smallVec ); point bPoint(GREAT, GREAT, GREAT); label bFaceI = -1; if (bHits.size()) { bPoint = bHits[0].hitPoint(); bFaceI = bHits[0].index(); } // Get first tracking point. Use bPoint, bFaceI if provided. point trackPt; label trackCellI = -1; label trackFaceI = -1; //Info<< "before getTrackingPoint : bPoint:" << bPoint // << " bFaceI:" << bFaceI << endl; getTrackingPoint ( offset, start_, bPoint, bFaceI, trackPt, trackCellI, trackFaceI ); //Info<< "after getTrackingPoint : " // << " trackPt:" << trackPt // << " trackCellI:" << trackCellI // << " trackFaceI:" << trackFaceI // << endl; if (trackCellI == -1) { // Line start_ - end_ does not intersect domain at all. // (or is along edge) // Set points and cell/face labels to empty lists //Info<< "calcSamples : Both start_ and end_ outside domain" // << endl; return; } if (trackFaceI == -1) { // No boundary face. Check for nearish internal face trackFaceI = findNearFace(trackCellI, trackPt, smallDist); } //Info<< "calcSamples : got first point to track from :" // << " trackPt:" << trackPt // << " trackCell:" << trackCellI // << " trackFace:" << trackFaceI // << endl; // // Track until hit end of all boundary intersections // // current segment number label segmentI = 0; // starting index of current segment in samplePts label startSegmentI = 0; // index in bHits; current boundary intersection label bHitI = 1; while(true) { if (trackFaceI != -1) { //Info<< "trackPt:" << trackPt << " on face so use." << endl; samplingPts.append(trackPt); samplingCells.append(trackCellI); samplingFaces.append(trackFaceI); samplingCurveDist.append(mag(trackPt - start_)); } // Initialize tracking starting from trackPt Cloud<passiveParticle> particles(mesh(), IDLList<passiveParticle>()); passiveParticle singleParticle ( particles, trackPt, trackCellI ); bool reachedBoundary = trackToBoundary ( singleParticle, samplingPts, samplingCells, samplingFaces, samplingCurveDist ); // fill sampleSegments for(label i = samplingPts.size() - 1; i >= startSegmentI; --i) { samplingSegments.append(segmentI); } if (!reachedBoundary) { //Info<< "calcSamples : Reached end of samples: " // << " samplePt now:" << singleParticle.position() // << endl; break; } // Go past boundary intersection where tracking stopped // Use coordinate comparison instead of face comparison for // accuracy reasons bool foundValidB = false; while (bHitI < bHits.size()) { scalar dist = (bHits[bHitI].hitPoint() - singleParticle.position()) & normOffset; //Info<< "Finding next boundary : " // << "bPoint:" << bHits[bHitI].hitPoint() // << " tracking:" << singleParticle.position() // << " dist:" << dist // << endl; if (dist > smallDist) { // hitpoint is past tracking position foundValidB = true; break; } else { bHitI++; } } if (!foundValidB) { // No valid boundary intersection found beyond tracking position break; } // Update starting point for tracking trackFaceI = bHits[bHitI].index(); trackPt = pushIn(bHits[bHitI].hitPoint(), trackFaceI); trackCellI = getBoundaryCell(trackFaceI); segmentI++; startSegmentI = samplingPts.size(); } }
void explicit_dynamics_app( const size_t ex, const size_t ey, const size_t ez, const size_t steps , PerformanceData & perf ) { typedef typename KokkosArray::MDArray<Scalar,device_type>::HostMirror scalar_array_h; typedef typename KokkosArray::MDArray<int,device_type>::HostMirror int_array_h; typedef typename KokkosArray::MDArray<Scalar,device_type> scalar_array_d; typedef typename KokkosArray::MDArray<int,device_type> int_array_d; typedef typename KokkosArray::Value<Scalar,device_type>::HostMirror scalar_h; typedef typename KokkosArray::Value<Scalar,device_type> scalar_d; const int NumStates = 2; const Scalar user_dt = 1.0e-5; //const Scalar end_time = 0.0050; // element block parameters const Scalar lin_bulk_visc = 0.0; const Scalar quad_bulk_visc = 0.0; //const Scalar lin_bulk_visc = 0.06; //const Scalar quad_bulk_visc = 1.2; const Scalar hg_stiffness = 0.0; const Scalar hg_viscosity = 0.0; //const Scalar hg_stiffness = 0.03; //const Scalar hg_viscosity = 0.001; // material properties const Scalar youngs_modulus=1.0e6; const Scalar poissons_ratio=0.0; const Scalar density = 8.0e-4; KokkosArray::Impl::Timer wall_clock ; BoxMeshFixture<int_array_h, scalar_array_h> mesh(ex,ey,ez); scalar_array_h nodal_mass_h = KokkosArray::create_mdarray< scalar_array_h >(mesh.nnodes); scalar_array_h elem_mass_h = KokkosArray::create_mdarray< scalar_array_h >(mesh.nelems); scalar_array_h acceleration_h = KokkosArray::create_mdarray< scalar_array_h >(mesh.nnodes, 3); scalar_array_h velocity_h = KokkosArray::create_mdarray< scalar_array_h >(mesh.nnodes, 3, 2); // two state field scalar_array_h displacement_h = KokkosArray::create_mdarray< scalar_array_h >(mesh.nnodes, 3, 2); // two state field scalar_array_h internal_force_h = KokkosArray::create_mdarray< scalar_array_h >(mesh.nnodes, 3); scalar_array_h stress_new_h = KokkosArray::create_mdarray< scalar_array_h >(mesh.nelems,6); //setup the initial condition on velocity { const unsigned X = 0; for (int inode = 0; inode< mesh.nnodes; ++inode) { if ( mesh.node_coords(inode,X) == 0) { velocity_h(inode,X,0) = 1.0e3; velocity_h(inode,X,1) = 1.0e3; } } } Region<Scalar,device_type> region( NumStates, mesh, lin_bulk_visc, quad_bulk_visc, hg_stiffness, hg_viscosity, youngs_modulus, poissons_ratio, density); KokkosArray::deep_copy(region.velocity, velocity_h); perf.mesh_time = wall_clock.seconds(); // Mesh and graph allocation and population. wall_clock.reset(); // Parameters required for the internal force computations. //-------------------------------------------------------------------------- // We will call a sequence of functions. These functions have been // grouped into several functors to balance the number of global memory // accesses versus requiring too many registers or too much L1 cache. // Global memory accees have read/write cost and memory subsystem contention cost. //-------------------------------------------------------------------------- KokkosArray::parallel_for( region.num_elements, initialize_element<Scalar,device_type>(region) ); KokkosArray::parallel_for( region.num_nodes, initialize_node<Scalar,device_type>(region) ); perf.init_time = wall_clock.seconds(); // Initialization wall_clock.reset(); int current_state = 0; int previous_state = 0; int next_state = 0; const int total_num_steps = steps ; perf.number_of_steps = total_num_steps ; for (int step = 0; step < total_num_steps; ++step) { //rotate the states previous_state = current_state; current_state = next_state; ++next_state; next_state %= NumStates; wall_clock.reset(); // First kernel 'grad_hgop' combines three functions: // gradient, velocity gradient, and hour glass operator. KokkosArray::parallel_for( region.num_elements , grad_hgop<Scalar, device_type> ( region, current_state, previous_state )); // Combine tensor decomposition and rotation functions. KokkosArray::parallel_for( region.num_elements , decomp_rotate<Scalar, device_type> ( region, current_state, previous_state )); // Single beastly function in this last functor, // did not notice any opportunity for splitting. KokkosArray::parallel_reduce( region.num_elements , divergence<Scalar, device_type> ( region, user_dt, current_state, previous_state ), set_next_time_step<Scalar,device_type>(region)); device_type::fence(); perf.internal_force_time += wall_clock.seconds(); wall_clock.reset(); // Assembly of elements' contributions to nodal force into // a nodal force vector. Update the accelerations, velocities, // displacements. // The same pattern can be used for matrix-free residual computations. KokkosArray::parallel_for( region.num_nodes , finish_step<Scalar, device_type>( region, ex, current_state, next_state )); device_type::fence(); perf.central_diff += wall_clock.seconds(); wall_clock.reset(); #ifdef KOKKOSARRAY_DEVICE_CUDA if (step%100 == 0 ) { KokkosArray::deep_copy(acceleration_h,region.acceleration); KokkosArray::deep_copy(velocity_h,region.velocity); KokkosArray::deep_copy(displacement_h,region.displacement); KokkosArray::deep_copy(internal_force_h,region.internal_force); KokkosArray::deep_copy(stress_new_h,region.stress_new); } #endif device_type::fence(); perf.copy_to_host_time += wall_clock.seconds(); wall_clock.reset(); } }
Foam::cellShapeControlMesh::cellShapeControlMesh(const Time& runTime) : DistributedDelaunayMesh<CellSizeDelaunay> ( runTime, meshSubDir ), runTime_(runTime), defaultCellSize_(0.0) { if (this->vertexCount()) { fvMesh mesh ( IOobject ( meshSubDir, runTime.timeName(), runTime, IOobject::READ_IF_PRESENT, IOobject::NO_WRITE ) ); if (mesh.nPoints() == this->vertexCount()) { pointScalarField sizes ( IOobject ( "sizes", runTime.timeName(), meshSubDir, runTime, IOobject::READ_IF_PRESENT, IOobject::NO_WRITE ), pointMesh::New(mesh) ); triadIOField alignments ( IOobject ( "alignments", mesh.time().timeName(), meshSubDir, mesh.time(), IOobject::READ_IF_PRESENT, IOobject::AUTO_WRITE ) ); if ( sizes.size() == this->vertexCount() && alignments.size() == this->vertexCount() ) { for ( Finite_vertices_iterator vit = finite_vertices_begin(); vit != finite_vertices_end(); ++vit ) { vit->targetCellSize() = sizes[vit->index()]; vit->alignment() = alignments[vit->index()]; } } else { FatalErrorIn ( "Foam::cellShapeControlMesh::cellShapeControlMesh" "(const Time&)" ) << "Cell size point field is not the same size as the " << "mesh." << abort(FatalError); } } } }
MStatus HairToolContext::doPress( MEvent& event ) { // if we have a left mouse click if(event.mouseButton() == MEvent::kLeftMouse) { //Our Viewer m_View = M3dView::active3dView(); //Get Screen click position event.getPosition( m_storage[0], m_storage[1] ); screenPoints = vector<vec2>(); screenPoints.push_back(vec2(m_storage[0], m_storage[1])); //char buffer[200]; //sprintf(buffer, "print \"%i, %i\\n\"", m_storage[0], m_storage[1]); //MGlobal::executeCommand(buffer); //Camera stuff MPoint origin = MPoint(); MVector direction = MVector(); m_View.viewToWorld(m_storage[0], m_storage[1], origin, direction); //Iterate through meshes in scene bool intersection = false; MPointArray points = MPointArray(); MIntArray polygonIds = MIntArray(); MItDag dagIter = MItDag(MItDag::kBreadthFirst, MFn::kInvalid); for( ; !dagIter.isDone(); dagIter.next() ){ MDagPath dagPath; dagIter.getPath(dagPath); MFnDagNode dagNode( dagPath); //Object cannot be intermediate, it must be a mesh if( dagNode.isIntermediateObject() ) continue; if( !dagPath.hasFn(MFn::kMesh) ) continue; if( dagPath.hasFn(MFn::kTransform) ) continue; MGlobal::executeCommand(MString("print \"node is a mesh \\n\"")); //MFnMesh mesh = MFnMesh(dagPath); MFnMesh mesh(dagPath); points = MPointArray(); polygonIds = MIntArray(); intersection = mesh.intersect(origin, direction, points, 1e-010, MSpace::kWorld, &polygonIds); if(intersection){ break; } } if(intersection){ intersectionFound = true; MDagPath dagPath; dagIter.getPath(dagPath); // MFnMesh mesh = MFnMesh(dagPath); MFnMesh mesh(dagPath); //Polygon Normal MVector polygonNormal; mesh.getPolygonNormal(polygonIds[0], polygonNormal, MSpace::kWorld); if(polygonNormal.normal().angle(direction.normal()) < 20.0f){ //polygonNormal = mesh.get } //Camera Right m_View.getCamera(dagPath); MFnCamera camera(dagPath); MVector cameraRight = camera.rightDirection(MSpace::kWorld); //Resulting Plane //Point point = points[0]; //Normal normal = cameraRight^polygonNormal; //pushback point splinePoints = vector<MPoint>(); splinePoints.push_back(MPoint(points[0].x, points[0].y, points[0].z, points[0].w)); /*//Calculate Tvalue tValue = (points[0].x - origin.x)/direction.x;*/ } else{ intersectionFound = false; MGlobal::executeCommand("print \" No Intersection \\n\""); } // yay! return MS::kSuccess; } // just let the base class handle the event*/ return MPxContext::doPress(event); }
int main(int argc, char* argv[]) { #ifdef WITH_PARALUTION HermesCommonApi.set_integral_param_value(matrixSolverType, SOLVER_PARALUTION_AMG); // Load the mesh. MeshSharedPtr mesh(new Mesh); MeshReaderH2D mloader; mloader.load("domain.mesh", mesh); // Perform initial mesh refinements. for(int i = 0; i < INIT_REF_NUM; i++) mesh->refine_all_elements(); mesh->refine_towards_boundary("Boundary air", INIT_REF_NUM_BDY); mesh->refine_towards_boundary("Boundary ground", INIT_REF_NUM_BDY); // Previous time level solution (initialized by the external temperature). MeshFunctionSharedPtr<double> tsln(new ConstantSolution<double> (mesh, TEMP_INIT)); // Initialize the weak formulation. double current_time = 0; CustomWeakFormHeatRK1 wf("Boundary air", ALPHA, LAMBDA, HEATCAP, RHO, time_step, ¤t_time, TEMP_INIT, T_FINAL, tsln); // Initialize boundary conditions. DefaultEssentialBCConst<double> bc_essential("Boundary ground", TEMP_INIT); EssentialBCs<double> bcs(&bc_essential); // Create an H1 space with default shapeset. SpaceSharedPtr<double> space(new H1Space<double>(mesh, &bcs, P_INIT)); int ndof = space->get_num_dofs(); Hermes::Mixins::Loggable::Static::info("ndof = %d", ndof); // Initialize Newton solver. NewtonSolver<double> newton(&wf, space); #ifdef SHOW_OUTPUT newton.set_verbose_output(true); #else newton.set_verbose_output(false); #endif newton.set_jacobian_constant(); newton.get_linear_matrix_solver()->as_AMGSolver()->set_smoother(Solvers::GMRES, Preconditioners::ILU); newton.get_linear_matrix_solver()->as_LoopSolver()->set_tolerance(1e-1, RelativeTolerance); #ifdef SHOW_OUTPUT // Initialize views. ScalarView Tview("Temperature", new WinGeom(0, 0, 450, 600)); Tview.set_min_max_range(0,20); Tview.fix_scale_width(30); #endif // Time stepping: int ts = 1; do { Hermes::Mixins::Loggable::Static::info("---- Time step %d, time %3.5f s", ts, current_time); newton.solve(); // Translate the resulting coefficient vector into the Solution sln. Solution<double>::vector_to_solution(newton.get_sln_vector(), space, tsln); #ifdef SHOW_OUTPUT // Visualize the solution. char title[100]; sprintf(title, "Time %3.2f s", current_time); Tview.set_title(title); Tview.show(tsln); #endif // Increase current time and time step counter. current_time += time_step; ts++; } while (current_time < T_FINAL); // Wait for the view to be closed. #ifdef SHOW_OUTPUT View::wait(); #endif return 0; #endif return 0; }
void GameObject::loadMesh( const std::string& _meshName ) { std::cout << "loading mesh\n"; // load the obj ngl::Obj mesh( _meshName ); std::cout << "checking triangular\n"; // this is only going to work for tri meshes so check if( ! mesh.isTriangular() ) { std::cout << "only works for tri meshes\n"; exit( EXIT_FAILURE ); } std::cout << "getting lists\n"; // get the obj data so we can process it locally std::vector <ngl::Vector> verts = mesh.getVertexList(); std::vector <ngl::Face> faces = mesh.getFaceList(); std::vector <ngl::Vector> tex = mesh.getTextureCordList(); std::vector <ngl::Vector> normals = mesh.getNormalList(); std::cout << "got mesh data\n"; // now we are going to process and pack the mesh into an ngl::VertexArrayObject std::vector <vertData> vboMesh; vertData d; unsigned int nFaces = faces.size(); unsigned int nNorm = normals.size(); unsigned int nTex = tex.size(); // loop for each of the faces for( unsigned int i = 0; i < nFaces; ++i ) { // now for each triangle in the face (remember we ensured tri above) for( int j = 0; j < 3; ++j ) { // pack in the vertex data first d.x = verts[faces[i].m_vert[j]].m_x; d.y = verts[faces[i].m_vert[j]].m_y; d.z = verts[faces[i].m_vert[j]].m_z; // now if we have norms of tex (possibly could not) pack them as well if( nNorm > 0 && nTex > 0 ) { d.nx = normals[faces[i].m_norm[j]].m_x; d.ny = normals[faces[i].m_norm[j]].m_y; d.nz = normals[faces[i].m_norm[j]].m_z; d.u = tex[faces[i].m_tex[j]].m_x; d.v = tex[faces[i].m_tex[j]].m_y; } // now if neither are present (only verts like Zbrush models) else if( nNorm == 0 && nTex == 0 ) { d.nx = 0; d.ny = 0; d.nz = 0; d.u = 0; d.v = 0; } // here we've got norms but not tex else if( nNorm > 0 && nTex == 0 ) { d.nx = normals[faces[i].m_norm[j]].m_x; d.ny = normals[faces[i].m_norm[j]].m_y; d.nz = normals[faces[i].m_norm[j]].m_z; d.u = 0; d.v = 0; } // here we've got tex but not norm least common else if( nNorm == 0 && nTex > 0 ) { d.nx = 0; d.ny = 0; d.nz = 0; d.u = tex[faces[i].m_tex[j]].m_x; d.v = tex[faces[i].m_tex[j]].m_y; } // now we calculate the tangent / bi-normal (tangent) based on the article here // http://www.terathon.com/code/tangent.html ngl::Vector c1 = normals[faces[i].m_norm[j]].cross( ngl::Vector( 0.0, 0.0, 1.0 ) ); ngl::Vector c2 = normals[faces[i].m_norm[j]].cross( ngl::Vector( 0.0, 1.0, 0.0 ) ); ngl::Vector tangent; ngl::Vector binormal; if( c1.length() > c2.length() ) { tangent = c1; } else { tangent = c2; } // now we normalize the tangent so we don't need to do it in the shader tangent.normalize(); // now we calculate the binormal using the model normal and tangent (cross) binormal = normals[faces[i].m_norm[j]].cross( tangent ); // normalize again so we don't need to in the shader binormal.normalize(); d.tx = tangent.m_x; d.ty = tangent.m_y; d.tz = tangent.m_z; d.bx = binormal.m_x; d.by = binormal.m_y; d.bz = binormal.m_z; // finally add it to our mesh VAO structure vboMesh.push_back( d ); } } // first we grab an instance of our VOA class as a TRIANGLE_STRIP vao = ngl::VertexArrayObject::createVOA( GL_TRIANGLES ); // next we bind it so it's active for setting data vao->bind(); unsigned int meshSize = vboMesh.size(); // now we have our data add it to the VAO, we need to tell the VAO the following // how much (in bytes) data we are copying // a pointer to the first element of data (in this case the address of the first element of the // std::vector vao->setData( meshSize * sizeof( vertData ), vboMesh[0].u ); // in this case we have packed our data in interleaved format as follows // u,v,nx,ny,nz,x,y,z // If you look at the shader we have the following attributes being used // attribute vec3 inVert; attribute 0 // attribute vec2 inUV; attribute 1 // attribute vec3 inNormal; attribure 2 // so we need to set the vertexAttributePointer so the correct size and type as follows // vertex is attribute 0 with x,y,z(3) parts of type GL_FLOAT, our complete packed data is // sizeof(vertData) and the offset into the data structure for the first x component is 5 (u,v,nx,ny,nz)..x vao->setVertexAttributePointer( 0, 3, GL_FLOAT, sizeof( vertData ), 5 ); // uv same as above but starts at 0 and is attrib 1 and only u,v so 2 vao->setVertexAttributePointer( 1, 2, GL_FLOAT, sizeof( vertData ), 0 ); // normal same as vertex only starts at position 2 (u,v)-> nx vao->setVertexAttributePointer( 2, 3, GL_FLOAT, sizeof( vertData ), 2 ); // tangent same as vertex only starts at position 8 (u,v)-> nx vao->setVertexAttributePointer( 3, 3, GL_FLOAT, sizeof( vertData ), 8 ); // bi-tangent (or Binormal) same as vertex only starts at position 11 (u,v)-> nx vao->setVertexAttributePointer( 4, 3, GL_FLOAT, sizeof( vertData ), 11 ); // now we have set the vertex attributes we tell the VAO class how many indices to draw when // glDrawArrays is called, in this case we use buffSize (but if we wished less of the sphere to be drawn we could // specify less (in steps of 3)) vao->setNumIndices( meshSize ); // finally we have finished for now so time to unbind the VAO vao->unbind(); }
Mesh createGrid(const RVector & x, const RVector & y, const RVector & z){ Mesh mesh(3); mesh.createGrid(x, y, z); return mesh; }
int main(int argc, char** argv){ //initialize libMesh LibMeshInit init(argc, argv); //parameters GetPot infile("fem_system_params.in"); const Real global_tolerance = infile("global_tolerance", 0.); const unsigned int nelem_target = infile("n_elements", 400); const bool transient = infile("transient", true); const Real deltat = infile("deltat", 0.005); unsigned int n_timesteps = infile("n_timesteps", 1); //const unsigned int coarsegridsize = infile("coarsegridsize", 1); const unsigned int coarserefinements = infile("coarserefinements", 0); const unsigned int max_adaptivesteps = infile("max_adaptivesteps", 10); //const unsigned int dim = 2; #ifdef LIBMESH_HAVE_EXODUS_API const unsigned int write_interval = infile("write_interval", 5); #endif // Create a mesh, with dimension to be overridden later, distributed // across the default MPI communicator. Mesh mesh(init.comm()); Mesh mesh2(init.comm()); GetPot infileForMesh("convdiff_mprime.in"); std::string find_mesh_here = infileForMesh("divided_mesh","meep.exo"); mesh.read(find_mesh_here); mesh2.read(find_mesh_here); //mesh.read("psiHF_mesh_1Dfused.xda"); // And an object to refine it /*MeshRefinement mesh_refinement(mesh); mesh_refinement.coarsen_by_parents() = true; mesh_refinement.absolute_global_tolerance() = global_tolerance; mesh_refinement.nelem_target() = nelem_target; mesh_refinement.refine_fraction() = 0.3; mesh_refinement.coarsen_fraction() = 0.3; mesh_refinement.coarsen_threshold() = 0.1; mesh_refinement.uniformly_refine(coarserefinements);*/ // Print information about the mesh to the screen. mesh.print_info(); // Create an equation systems object. EquationSystems equation_systems (mesh); EquationSystems equation_systems_mix(mesh2); //name system ConvDiff_PrimarySys & system_primary = equation_systems.add_system<ConvDiff_PrimarySys>("ConvDiff_PrimarySys"); ConvDiff_AuxSys & system_aux = equation_systems.add_system<ConvDiff_AuxSys>("ConvDiff_AuxSys"); ConvDiff_MprimeSys & system_mix = equation_systems_mix.add_system<ConvDiff_MprimeSys>("ConvDiff_MprimeSys"); //steady-state problem system_primary.time_solver = AutoPtr<TimeSolver>(new SteadySolver(system_primary)); system_aux.time_solver = AutoPtr<TimeSolver>(new SteadySolver(system_aux)); system_mix.time_solver = AutoPtr<TimeSolver>(new SteadySolver(system_mix)); libmesh_assert_equal_to (n_timesteps, 1); /*equation_systems.read("psiLF.xda", READ, EquationSystems::READ_HEADER | EquationSystems::READ_DATA | EquationSystems::READ_ADDITIONAL_DATA); equation_systems.print_info();*/ // Initialize the system equation_systems.init (); // Set the time stepping options system_primary.deltat = deltat; system_aux.deltat = deltat;//this is ignored for SteadySolver...right? // And the nonlinear solver options NewtonSolver *solver_primary = new NewtonSolver(system_primary); system_primary.time_solver->diff_solver() = AutoPtr<DiffSolver>(solver_primary); solver_primary->quiet = infile("solver_quiet", true); solver_primary->verbose = !solver_primary->quiet; solver_primary->max_nonlinear_iterations = infile("max_nonlinear_iterations", 15); solver_primary->relative_step_tolerance = infile("relative_step_tolerance", 1.e-3); solver_primary->relative_residual_tolerance = infile("relative_residual_tolerance", 0.0); solver_primary->absolute_residual_tolerance = infile("absolute_residual_tolerance", 0.0); NewtonSolver *solver_aux = new NewtonSolver(system_aux); system_aux.time_solver->diff_solver() = AutoPtr<DiffSolver>(solver_aux); solver_aux->quiet = infile("solver_quiet", true); solver_aux->verbose = !solver_aux->quiet; solver_aux->max_nonlinear_iterations = infile("max_nonlinear_iterations", 15); solver_aux->relative_step_tolerance = infile("relative_step_tolerance", 1.e-3); solver_aux->relative_residual_tolerance = infile("relative_residual_tolerance", 0.0); solver_aux->absolute_residual_tolerance = infile("absolute_residual_tolerance", 0.0); // And the linear solver options solver_primary->max_linear_iterations = infile("max_linear_iterations", 50000); solver_primary->initial_linear_tolerance = infile("initial_linear_tolerance", 1.e-3); solver_aux->max_linear_iterations = infile("max_linear_iterations", 50000); solver_aux->initial_linear_tolerance = infile("initial_linear_tolerance", 1.e-3); // Print information about the system to the screen. equation_systems.print_info(); // Now we begin the timestep loop to compute the time-accurate // solution of the equations...not that this is transient, but eh, why not... for (unsigned int t_step=0; t_step != n_timesteps; ++t_step){ // A pretty update message std::cout << "\n\nSolving time step " << t_step << ", time = " << system_primary.time << std::endl; // Adaptively solve the timestep unsigned int a_step = 0; /*for (; a_step != max_adaptivesteps; ++a_step) { system.solve(); system.postprocess(); ErrorVector error; AutoPtr<ErrorEstimator> error_estimator; // To solve to a tolerance in this problem we // need a better estimator than Kelly if (global_tolerance != 0.) { // We can't adapt to both a tolerance and a mesh // size at once libmesh_assert_equal_to (nelem_target, 0); UniformRefinementEstimator *u = new UniformRefinementEstimator; // The lid-driven cavity problem isn't in H1, so // lets estimate L2 error u->error_norm = L2; error_estimator.reset(u); } else { // If we aren't adapting to a tolerance we need a // target mesh size libmesh_assert_greater (nelem_target, 0); // Kelly is a lousy estimator to use for a problem // not in H1 - if we were doing more than a few // timesteps we'd need to turn off or limit the // maximum level of our adaptivity eventually error_estimator.reset(new KellyErrorEstimator); } // Calculate error std::vector<Real> weights(9,1.0); // based on u, v, p, c, their adjoints, and source parameter // Keep the same default norm type. std::vector<FEMNormType> norms(1, error_estimator->error_norm.type(0)); error_estimator->error_norm = SystemNorm(norms, weights); error_estimator->estimate_error(system, error); // Print out status at each adaptive step. Real global_error = error.l2_norm(); std::cout << "Adaptive step " << a_step << ": " << std::endl; if (global_tolerance != 0.) std::cout << "Global_error = " << global_error << std::endl; if (global_tolerance != 0.) std::cout << "Worst element error = " << error.maximum() << ", mean = " << error.mean() << std::endl; if (global_tolerance != 0.) { // If we've reached our desired tolerance, we // don't need any more adaptive steps if (global_error < global_tolerance) break; mesh_refinement.flag_elements_by_error_tolerance(error); } else { // If flag_elements_by_nelem_target returns true, this // should be our last adaptive step. if (mesh_refinement.flag_elements_by_nelem_target(error)) { mesh_refinement.refine_and_coarsen_elements(); equation_systems.reinit(); a_step = max_adaptivesteps; break; } } // Carry out the adaptive mesh refinement/coarsening mesh_refinement.refine_and_coarsen_elements(); equation_systems.reinit(); std::cout << "Refined mesh to " << mesh.n_active_elem() << " active elements and " << equation_systems.n_active_dofs() << " active dofs." << std::endl; }*/ // Do one last solve if necessary if (a_step == max_adaptivesteps) { system_primary.solve(); std::cout << "\n\n Residual L2 norm (primary): " << system_primary.calculate_norm(*system_primary.rhs, L2) << "\n"; system_aux.solve(); std::cout << "\n\n Residual L2 norm (auxiliary): " << system_aux.calculate_norm(*system_aux.rhs, L2) << "\n"; equation_systems_mix.init(); DirectSolutionTransfer sol_transfer(init.comm()); sol_transfer.transfer(system_primary.variable(system_primary.variable_number("c")), system_mix.variable(system_mix.variable_number("c"))); sol_transfer.transfer(system_primary.variable(system_primary.variable_number("zc")), system_mix.variable(system_mix.variable_number("zc"))); sol_transfer.transfer(system_primary.variable(system_primary.variable_number("fc")), system_mix.variable(system_mix.variable_number("fc"))); sol_transfer.transfer(system_aux.variable(system_aux.variable_number("aux_c")), system_mix.variable(system_mix.variable_number("aux_c"))); sol_transfer.transfer(system_aux.variable(system_aux.variable_number("aux_zc")), system_mix.variable(system_mix.variable_number("aux_zc"))); sol_transfer.transfer(system_aux.variable(system_aux.variable_number("aux_fc")), system_mix.variable(system_mix.variable_number("aux_fc"))); std::cout << "c: " << system_mix.calculate_norm(*system_mix.solution, 0, L2) << " " << system_primary.calculate_norm(*system_primary.solution, 0, L2) << std::endl; std::cout << "zc: " << system_mix.calculate_norm(*system_mix.solution, 1, L2) << " " << system_primary.calculate_norm(*system_primary.solution, 1, L2) << std::endl; std::cout << "fc: " << system_mix.calculate_norm(*system_mix.solution, 2, L2) << " " << system_primary.calculate_norm(*system_primary.solution, 2, L2) << std::endl; std::cout << "aux_c: " << system_mix.calculate_norm(*system_mix.solution, 3, L2) << " " << system_aux.calculate_norm(*system_aux.solution, 0, L2) << std::endl; std::cout << "aux_zc: " << system_mix.calculate_norm(*system_mix.solution, 4, L2) << " " << system_aux.calculate_norm(*system_aux.solution, 1, L2) << std::endl; std::cout << "aux_fc: " << system_mix.calculate_norm(*system_mix.solution, 5, L2) << " " << system_aux.calculate_norm(*system_aux.solution, 2, L2) << std::endl; std::cout << "Overall: " << system_mix.calculate_norm(*system_mix.solution, L2) << std::endl; system_mix.postprocess(); //DEBUG std::cout << " M_HF(psiLF): " << std::setprecision(17) << system_mix.get_MHF_psiLF() << "\n"; std::cout << " I(psiLF): " << std::setprecision(17) << system_mix.get_MLF_psiLF() << "\n"; } // Advance to the next timestep in a transient problem system_primary.time_solver->advance_timestep(); #ifdef LIBMESH_HAVE_EXODUS_API // Write out this timestep if we're requested to if ((t_step+1)%write_interval == 0) { //std::ostringstream file_name; // We write the file in the ExodusII format. //file_name << "out_" // << std::setw(3) // << std::setfill('0') // << std::right // << t_step+1 // << ".e"; //ExodusII_IO(mesh).write_timestep(file_name.str(), ExodusII_IO(mesh).write_timestep("psiLF.exo", equation_systems_mix, 1, /* This number indicates how many time steps are being written to the file */ system_primary.time); mesh.write("psiLF_mesh.xda"); equation_systems_mix.write("psiLF.xda", WRITE, EquationSystems::WRITE_DATA | EquationSystems::WRITE_ADDITIONAL_DATA); } #endif // #ifdef LIBMESH_HAVE_EXODUS_API } // All done. return 0; } //end main
void TTextureMesh::saveData(TOStream &os) { struct locals { static inline bool hasNon1Rigidity(const TTextureMesh &mesh) { int v, vCount = int(mesh.verticesCount()); for (v = 0; v != vCount; ++v) if (mesh.vertex(v).P().rigidity != 1.0) return true; return false; } }; // NOTE: Primitives saved by INDEX iteration is NOT COINCIDENTAL - since // the lists' internal linking could have been altered to mismatch the // natural indexing referred to by primitives' data. if (m_vertices.size() != m_vertices.nodesCount() || m_edges.size() != m_edges.nodesCount() || m_faces.size() != m_faces.nodesCount()) { // Ensure the mesh is already squeezed - save a squeezed // copy if necessary TTextureMesh mesh(*this); mesh.squeeze(); mesh.saveData(os); return; } assert(m_vertices.size() == m_vertices.nodesCount()); assert(m_edges.size() == m_edges.nodesCount()); assert(m_faces.size() == m_faces.nodesCount()); // Store Vertices os.openChild("V"); { int vCount = int(m_vertices.size()); os << vCount; for (int v = 0; v != vCount; ++v) { TTextureMesh::vertex_type &vx = m_vertices[v]; os << vx.P().x << vx.P().y; } } os.closeChild(); // Store Edges os.openChild("E"); { int eCount = int(m_edges.size()); os << eCount; for (int e = 0; e != eCount; ++e) { TTextureMesh::edge_type &ed = m_edges[e]; os << ed.vertex(0) << ed.vertex(1); } } os.closeChild(); // Store Faces os.openChild("F"); { int fCount = int(m_faces.size()); os << fCount; for (int f = 0; f != fCount; ++f) { TTextureMesh::face_type &fc = m_faces[f]; int e, eCount = fc.edgesCount(); for (e = 0; e < eCount; ++e) os << fc.edge(e); } } os.closeChild(); // Store rigidities if (locals::hasNon1Rigidity(*this)) { os.openChild("rigidities"); { int vCount = int(m_vertices.size()); os << vCount; for (int v = 0; v != vCount; ++v) os << m_vertices[v].P().rigidity; } os.closeChild(); } }
void Foam::distanceSurface::createGeometry() { if (debug) { Pout<< "distanceSurface::createGeometry :updating geometry." << endl; } // Clear any stored topologies facesPtr_.clear(); isoSurfCellPtr_.clear(); isoSurfPtr_.clear(); // Clear derived data clearGeom(); const fvMesh& fvm = static_cast<const fvMesh&>(mesh()); // Distance to cell centres // ~~~~~~~~~~~~~~~~~~~~~~~~ cellDistancePtr_.reset ( new volScalarField ( IOobject ( "cellDistance", fvm.time().timeName(), fvm.time(), IOobject::NO_READ, IOobject::NO_WRITE, false ), fvm, dimensionedScalar("zero", dimLength, 0) ) ); volScalarField& cellDistance = cellDistancePtr_(); // Internal field { const pointField& cc = fvm.C(); scalarField& fld = cellDistance.primitiveFieldRef(); List<pointIndexHit> nearest; surfPtr_().findNearest ( cc, scalarField(cc.size(), GREAT), nearest ); if (signed_) { List<volumeType> volType; surfPtr_().getVolumeType(cc, volType); forAll(volType, i) { volumeType vT = volType[i]; if (vT == volumeType::OUTSIDE) { fld[i] = Foam::mag(cc[i] - nearest[i].hitPoint()); } else if (vT == volumeType::INSIDE) { fld[i] = -Foam::mag(cc[i] - nearest[i].hitPoint()); } else { FatalErrorInFunction << "getVolumeType failure, neither INSIDE or OUTSIDE" << exit(FatalError); } } } else {