int main (int argc, char* argv[]) { BoxLib::Initialize(argc,argv); // What time is it now? We'll use this to compute total run time. Real strt_time = ParallelDescriptor::second(); std::cout << std::setprecision(15); // ParmParse is way of reading inputs from the inputs file ParmParse pp; int verbose = 0; pp.query("verbose", verbose); // We need to get n_cell from the inputs file - this is the number of cells on each side of // a square (or cubic) domain. int n_cell; pp.get("n_cell",n_cell); int max_grid_size; pp.get("max_grid_size",max_grid_size); // Default plot_int to 1, allow us to set it to something else in the inputs file // If plot_int < 0 then no plot files will be written int plot_int = 1; pp.query("plot_int",plot_int); // Default nsteps to 0, allow us to set it to something else in the inputs file int nsteps = 0; pp.query("nsteps",nsteps); pp.query("do_tiling", do_tiling); // Define a single box covering the domain IntVect dom_lo(0,0,0); IntVect dom_hi(n_cell-1,n_cell-1,n_cell-1); Box domain(dom_lo,dom_hi); // Initialize the boxarray "bs" from the single box "bx" BoxArray bs(domain); // Break up boxarray "bs" into chunks no larger than "max_grid_size" along a direction bs.maxSize(max_grid_size); // This defines the physical size of the box. Right now the box is [-1,1] in each direction. RealBox real_box; for (int n = 0; n < BL_SPACEDIM; n++) { real_box.setLo(n,-1.0); real_box.setHi(n, 1.0); } // This says we are using Cartesian coordinates int coord = 0; // This sets the boundary conditions to be doubly or triply periodic int is_per[BL_SPACEDIM]; for (int i = 0; i < BL_SPACEDIM; i++) is_per[i] = 1; // This defines a Geometry object which is useful for writing the plotfiles Geometry geom(domain,&real_box,coord,is_per); // This defines the mesh spacing Real dx[BL_SPACEDIM]; for ( int n=0; n<BL_SPACEDIM; n++ ) dx[n] = ( geom.ProbHi(n) - geom.ProbLo(n) )/domain.length(n); // Nghost = number of ghost cells for each array int Nghost = 1; // Ncomp = number of components for each array int Ncomp = 1; pp.query("ncomp", Ncomp); // Allocate space for the old_phi and new_phi -- we define old_phi and new_phi as PArray < MultiFab > phis(2, PArrayManage); phis.set(0, new MultiFab(bs, Ncomp, Nghost)); phis.set(1, new MultiFab(bs, Ncomp, Nghost)); MultiFab* old_phi = &phis[0]; MultiFab* new_phi = &phis[1]; // Initialize both to zero (just because) old_phi->setVal(0.0); new_phi->setVal(0.0); // Initialize phi by calling a Fortran routine. // MFIter = MultiFab Iterator #ifdef _OPENMP #pragma omp parallel #endif for ( MFIter mfi(*new_phi,true); mfi.isValid(); ++mfi ) { const Box& bx = mfi.tilebox(); init_phi(bx.loVect(),bx.hiVect(), BL_TO_FORTRAN((*new_phi)[mfi]),Ncomp, dx,geom.ProbLo(),geom.ProbHi()); } // Call the compute_dt routine to return a time step which we will pass to advance Real dt = compute_dt(dx[0]); // Write a plotfile of the initial data if plot_int > 0 (plot_int was defined in the inputs file) if (plot_int > 0) { int n = 0; const std::string& pltfile = BoxLib::Concatenate("plt",n,5); writePlotFile(pltfile, *new_phi, geom); } Real adv_start_time = ParallelDescriptor::second(); for (int n = 1; n <= nsteps; n++) { // Swap the pointers so we don't have to allocate and de-allocate data std::swap(old_phi, new_phi); // new_phi = old_phi + dt * (something) advance(old_phi, new_phi, dx, dt, geom); // Tell the I/O Processor to write out which step we're doing if (verbose && ParallelDescriptor::IOProcessor()) std::cout << "Advanced step " << n << std::endl; // Write a plotfile of the current data (plot_int was defined in the inputs file) if (plot_int > 0 && n%plot_int == 0) { const std::string& pltfile = BoxLib::Concatenate("plt",n,5); writePlotFile(pltfile, *new_phi, geom); } } // Call the timer again and compute the maximum difference between the start time and stop time // over all processors Real advance_time = ParallelDescriptor::second() - adv_start_time; Real stop_time = ParallelDescriptor::second() - strt_time; const int IOProc = ParallelDescriptor::IOProcessorNumber(); ParallelDescriptor::ReduceRealMax(stop_time,IOProc); ParallelDescriptor::ReduceRealMax(advance_time,IOProc); ParallelDescriptor::ReduceRealMax(kernel_time,IOProc); ParallelDescriptor::ReduceRealMax(FB_time,IOProc); // Tell the I/O Processor to write out the "run time" if (ParallelDescriptor::IOProcessor()) { std::cout << "Kernel time = " << kernel_time << std::endl; std::cout << "FB time = " << FB_time << std::endl; std::cout << "Advance time = " << advance_time << std::endl; std::cout << "Total run time = " << stop_time << std::endl; } // Say goodbye to MPI, etc... BoxLib::Finalize(); }
int main (int argc, char* argv[]) { BoxLib::Initialize(argc,argv); // What time is it now? We'll use this to compute total run time. Real strt_time = ParallelDescriptor::second(); std::cout << std::setprecision(15); // ParmParse is way of reading inputs from the inputs file ParmParse pp; // We need to get n_cell from the inputs file - this is the number of cells on each side of // a square (or cubic) domain. int n_cell; pp.get("n_cell",n_cell); // Default nsteps to 0, allow us to set it to something else in the inputs file int max_grid_size; pp.get("max_grid_size",max_grid_size); // Default plot_int to 1, allow us to set it to something else in the inputs file // If plot_int < 0 then no plot files will be written int plot_int = 1; pp.query("plot_int",plot_int); // Default nsteps to 0, allow us to set it to something else in the inputs file int nsteps = 0; pp.query("nsteps",nsteps); // Define a single box covering the domain #if (BL_SPACEDIM == 2) IntVect dom_lo(0,0); IntVect dom_hi(n_cell-1,n_cell-1); #else IntVect dom_lo(0,0,0); IntVect dom_hi(n_cell-1,n_cell-1,n_cell-1); #endif Box domain(dom_lo,dom_hi); // Initialize the boxarray "bs" from the single box "bx" BoxArray bs(domain); // Break up boxarray "bs" into chunks no larger than "max_grid_size" along a direction bs.maxSize(max_grid_size); // This defines the physical size of the box. Right now the box is [-1,1] in each direction. RealBox real_box; for (int n = 0; n < BL_SPACEDIM; n++) { real_box.setLo(n,-1.0); real_box.setHi(n, 1.0); } // This says we are using Cartesian coordinates int coord = 0; // This sets the boundary conditions to be doubly or triply periodic int is_per[BL_SPACEDIM]; for (int i = 0; i < BL_SPACEDIM; i++) is_per[i] = 1; // This defines a Geometry object which is useful for writing the plotfiles Geometry geom(domain,&real_box,coord,is_per); // This defines the mesh spacing Real dx[BL_SPACEDIM]; for ( int n=0; n<BL_SPACEDIM; n++ ) dx[n] = ( geom.ProbHi(n) - geom.ProbLo(n) )/domain.length(n); // Nghost = number of ghost cells for each array int Nghost = 1; // Ncomp = number of components for each array int Ncomp = 1; // Make sure we can fill the ghost cells from the adjacent grid if (Nghost > max_grid_size) std::cout << "NGHOST < MAX_GRID_SIZE -- grids are too small! " << std::endl; // Allocate space for the old_phi and new_phi -- we define old_phi and new_phi as // pointers to the MultiFabs MultiFab* old_phi = new MultiFab(bs, Ncomp, Nghost); MultiFab* new_phi = new MultiFab(bs, Ncomp, Nghost); // Initialize both to zero (just because) old_phi->setVal(0.0); new_phi->setVal(0.0); // Initialize the old_phi by calling a Fortran routine. // MFIter = MultiFab Iterator for ( MFIter mfi(*new_phi); mfi.isValid(); ++mfi ) { const Box& bx = mfi.validbox(); FORT_INIT_PHI((*new_phi)[mfi].dataPtr(), bx.loVect(),bx.hiVect(), &Nghost, dx,geom.ProbLo(),geom.ProbHi()); } // Call the compute_dt routine to return a time step which we will pass to advance Real dt = compute_dt(dx[0]); // Write a plotfile of the initial data if plot_int > 0 (plot_int was defined in the inputs file) if (plot_int > 0) { int n = 0; const std::string& pltfile = BoxLib::Concatenate("plt",n,5); writePlotFile(pltfile, *new_phi, geom); } // build the flux multifabs MultiFab* flux = new MultiFab[BL_SPACEDIM]; for (int dir = 0; dir < BL_SPACEDIM; dir++) { BoxArray edge_grids(bs); // flux(dir) has one component, zero ghost cells, and is nodal in direction dir edge_grids.surroundingNodes(dir); flux[dir].define(edge_grids,1,0,Fab_allocate); } for (int n = 1; n <= nsteps; n++) { // Swap the pointers so we don't have to allocate and de-allocate data std::swap(old_phi, new_phi); // new_phi = old_phi + dt * (something) advance(old_phi, new_phi, flux, dx, dt, geom); // Tell the I/O Processor to write out which step we're doing if (ParallelDescriptor::IOProcessor()) std::cout << "Advanced step " << n << std::endl; // Write a plotfile of the current data (plot_int was defined in the inputs file) if (plot_int > 0 && n%plot_int == 0) { const std::string& pltfile = BoxLib::Concatenate("plt",n,5); writePlotFile(pltfile, *new_phi, geom); } } // Call the timer again and compute the maximum difference between the start time and stop time // over all processors Real stop_time = ParallelDescriptor::second() - strt_time; const int IOProc = ParallelDescriptor::IOProcessorNumber(); ParallelDescriptor::ReduceRealMax(stop_time,IOProc); // Tell the I/O Processor to write out the "run time" if (ParallelDescriptor::IOProcessor()) std::cout << "Run time = " << stop_time << std::endl; // Say goodbye to MPI, etc... BoxLib::Finalize(); }
void main_main () { // What time is it now? We'll use this to compute total run time. Real strt_time = ParallelDescriptor::second(); std::cout << std::setprecision(15); int n_cell, max_grid_size, nsteps, plot_int, is_periodic[BL_SPACEDIM]; // Boundary conditions Array<int> lo_bc(BL_SPACEDIM), hi_bc(BL_SPACEDIM); // inputs parameters { // ParmParse is way of reading inputs from the inputs file ParmParse pp; // We need to get n_cell from the inputs file - this is the number of cells on each side of // a square (or cubic) domain. pp.get("n_cell",n_cell); // Default nsteps to 0, allow us to set it to something else in the inputs file pp.get("max_grid_size",max_grid_size); // Default plot_int to 1, allow us to set it to something else in the inputs file // If plot_int < 0 then no plot files will be written plot_int = 1; pp.query("plot_int",plot_int); // Default nsteps to 0, allow us to set it to something else in the inputs file nsteps = 0; pp.query("nsteps",nsteps); // Boundary conditions - default is periodic (INT_DIR) for (int i = 0; i < BL_SPACEDIM; ++i) { lo_bc[i] = hi_bc[i] = INT_DIR; // periodic boundaries are interior boundaries } pp.queryarr("lo_bc",lo_bc,0,BL_SPACEDIM); pp.queryarr("hi_bc",hi_bc,0,BL_SPACEDIM); } // make BoxArray and Geometry BoxArray ba; Geometry geom; { IntVect dom_lo(IntVect(D_DECL(0,0,0))); IntVect dom_hi(IntVect(D_DECL(n_cell-1, n_cell-1, n_cell-1))); Box domain(dom_lo, dom_hi); // Initialize the boxarray "ba" from the single box "bx" ba.define(domain); // Break up boxarray "ba" into chunks no larger than "max_grid_size" along a direction ba.maxSize(max_grid_size); // This defines the physical size of the box. Right now the box is [-1,1] in each direction. RealBox real_box; for (int n = 0; n < BL_SPACEDIM; n++) { real_box.setLo(n,-1.0); real_box.setHi(n, 1.0); } // This says we are using Cartesian coordinates int coord = 0; // This sets the boundary conditions to be doubly or triply periodic int is_periodic[BL_SPACEDIM]; for (int i = 0; i < BL_SPACEDIM; i++) { is_periodic[i] = 0; if (lo_bc[i] == 0 && hi_bc[i] == 0) { is_periodic[i] = 1; } } // This defines a Geometry object geom.define(domain,&real_box,coord,is_periodic); } // Boundary conditions PhysBCFunct physbcf; BCRec bcr(&lo_bc[0], &hi_bc[0]); physbcf.define(geom, bcr, BndryFunctBase(phifill)); // phifill is a fortran function // define dx[] const Real* dx = geom.CellSize(); // Nghost = number of ghost cells for each array int Nghost = 1; // Ncomp = number of components for each array int Ncomp = 1; // time = starting time in the simulation Real time = 0.0; // we allocate two phi multifabs; one will store the old state, the other the new // we swap the indices each time step to avoid copies of new into old PArray<MultiFab> phi(2, PArrayManage); phi.set(0, new MultiFab(ba, Ncomp, Nghost)); phi.set(1, new MultiFab(ba, Ncomp, Nghost)); // Initialize both to zero (just because) phi[0].setVal(0.0); phi[1].setVal(0.0); // Initialize phi[init_index] by calling a Fortran routine. // MFIter = MultiFab Iterator int init_index = 0; for ( MFIter mfi(phi[init_index]); mfi.isValid(); ++mfi ) { const Box& bx = mfi.validbox(); init_phi(phi[init_index][mfi].dataPtr(), bx.loVect(), bx.hiVect(), &Nghost, geom.CellSize(), geom.ProbLo(), geom.ProbHi()); } // compute the time step Real dt = 0.9*dx[0]*dx[0] / (2.0*BL_SPACEDIM); // Write a plotfile of the initial data if plot_int > 0 (plot_int was defined in the inputs file) if (plot_int > 0) { int n = 0; const std::string& pltfile = BoxLib::Concatenate("plt",n,5); writePlotFile(pltfile, phi[init_index], geom, time); } // build the flux multifabs PArray<MultiFab> flux(BL_SPACEDIM, PArrayManage); for (int dir = 0; dir < BL_SPACEDIM; dir++) { // flux(dir) has one component, zero ghost cells, and is nodal in direction dir BoxArray edge_ba = ba; edge_ba.surroundingNodes(dir); flux.set(dir, new MultiFab(edge_ba, 1, 0)); } int old_index = init_index; for (int n = 1; n <= nsteps; n++, old_index = 1 - old_index) { int new_index = 1 - old_index; // new_phi = old_phi + dt * (something) advance(phi[old_index], phi[new_index], flux, time, dt, geom, physbcf, bcr); time = time + dt; // Tell the I/O Processor to write out which step we're doing if (ParallelDescriptor::IOProcessor()) std::cout << "Advanced step " << n << std::endl; // Write a plotfile of the current data (plot_int was defined in the inputs file) if (plot_int > 0 && n%plot_int == 0) { const std::string& pltfile = BoxLib::Concatenate("plt",n,5); writePlotFile(pltfile, phi[new_index], geom, time); } } // Call the timer again and compute the maximum difference between the start time and stop time // over all processors Real stop_time = ParallelDescriptor::second() - strt_time; const int IOProc = ParallelDescriptor::IOProcessorNumber(); ParallelDescriptor::ReduceRealMax(stop_time,IOProc); // Tell the I/O Processor to write out the "run time" if (ParallelDescriptor::IOProcessor()) { std::cout << "Run time = " << stop_time << std::endl; } }
int main(int argc, char* argv[]) { BoxLib::Initialize(argc,argv); BL_PROFILE_VAR("main()", pmain); std::cout << std::setprecision(15); ParmParse ppmg("mg"); ppmg.query("v", verbose); ppmg.query("maxorder", maxorder); ParmParse pp; { std::string solver_type_s; pp.get("solver_type",solver_type_s); if (solver_type_s == "BoxLib_C") { solver_type = BoxLib_C; } else if (solver_type_s == "BoxLib_C4") { solver_type = BoxLib_C4; } else if (solver_type_s == "BoxLib_F") { #ifdef USE_F90_SOLVERS solver_type = BoxLib_F; #else BoxLib::Error("Set USE_FORTRAN=TRUE in GNUmakefile"); #endif } else if (solver_type_s == "Hypre") { #ifdef USEHYPRE solver_type = Hypre; #else BoxLib::Error("Set USE_HYPRE=TRUE in GNUmakefile"); #endif } else if (solver_type_s == "All") { solver_type = All; } else { if (ParallelDescriptor::IOProcessor()) { std::cout << "Don't know this solver type: " << solver_type << std::endl; } BoxLib::Error(""); } } { std::string bc_type_s; pp.get("bc_type",bc_type_s); if (bc_type_s == "Dirichlet") { bc_type = Dirichlet; #ifdef USEHPGMG domain_boundary_condition = BC_DIRICHLET; #endif } else if (bc_type_s == "Neumann") { bc_type = Neumann; #ifdef USEHPGMG BoxLib::Error("HPGMG does not support Neumann boundary conditions"); #endif } else if (bc_type_s == "Periodic") { bc_type = Periodic; #ifdef USEHPGMG domain_boundary_condition = BC_PERIODIC; #endif } else { if (ParallelDescriptor::IOProcessor()) { std::cout << "Don't know this boundary type: " << bc_type << std::endl; } BoxLib::Error(""); } } pp.query("tol_rel", tolerance_rel); pp.query("tol_abs", tolerance_abs); pp.query("maxiter", maxiter); pp.query("plot_rhs" , plot_rhs); pp.query("plot_beta", plot_beta); pp.query("plot_soln", plot_soln); pp.query("plot_asol", plot_asol); pp.query("plot_err", plot_err); pp.query("comp_norm", comp_norm); Real a, b; pp.get("a", a); pp.get("b", b); pp.get("n_cell",n_cell); pp.get("max_grid_size",max_grid_size); // Define a single box covering the domain IntVect dom_lo(D_DECL(0,0,0)); IntVect dom_hi(D_DECL(n_cell-1,n_cell-1,n_cell-1)); Box domain(dom_lo,dom_hi); // Initialize the boxarray "bs" from the single box "bx" BoxArray bs(domain); // Break up boxarray "bs" into chunks no larger than "max_grid_size" along a direction bs.maxSize(max_grid_size); // This defines the physical size of the box. Right now the box is [0,1] in each direction. RealBox real_box; for (int n = 0; n < BL_SPACEDIM; n++) { real_box.setLo(n, 0.0); real_box.setHi(n, 1.0); } // This says we are using Cartesian coordinates int coord = 0; // This sets the boundary conditions to be periodic or not Array<int> is_per(BL_SPACEDIM,1); if (bc_type == Dirichlet || bc_type == Neumann) { if (ParallelDescriptor::IOProcessor()) { std::cout << "Using Dirichlet or Neumann boundary conditions." << std::endl; } for (int n = 0; n < BL_SPACEDIM; n++) is_per[n] = 0; } else { if (ParallelDescriptor::IOProcessor()) { std::cout << "Using periodic boundary conditions." << std::endl; } for (int n = 0; n < BL_SPACEDIM; n++) is_per[n] = 1; } // This defines a Geometry object which is useful for writing the plotfiles Geometry geom(domain,&real_box,coord,is_per.dataPtr()); for ( int n=0; n<BL_SPACEDIM; n++ ) { dx[n] = ( geom.ProbHi(n) - geom.ProbLo(n) )/domain.length(n); } if (ParallelDescriptor::IOProcessor()) { std::cout << "Grid resolution : " << n_cell << " (cells)" << std::endl; std::cout << "Domain size : " << real_box.hi(0) - real_box.lo(0) << " (length unit) " << std::endl; std::cout << "Max_grid_size : " << max_grid_size << " (cells)" << std::endl; std::cout << "Number of grids : " << bs.size() << std::endl; } // Allocate and define the right hand side. bool do_4th = (solver_type==BoxLib_C4 || solver_type==All); int ngr = (do_4th ? 1 : 0); MultiFab rhs(bs, Ncomp, ngr); setup_rhs(rhs, geom); // Set up the Helmholtz operator coefficients. MultiFab alpha(bs, Ncomp, 0); PArray<MultiFab> beta(BL_SPACEDIM, PArrayManage); for ( int n=0; n<BL_SPACEDIM; ++n ) { BoxArray bx(bs); beta.set(n, new MultiFab(bx.surroundingNodes(n), Ncomp, 0, Fab_allocate)); } // The way HPGMG stores face-centered data is completely different than the // way BoxLib does it, and translating between the two directly via indexing // magic is a nightmare. Happily, the way this tutorial calculates // face-centered values is by first calculating cell-centered values and then // interpolating to the cell faces. HPGMG can do the same thing, so rather // than converting directly from BoxLib's face-centered data to HPGMG's, just // give HPGMG the cell-centered data and let it interpolate itself. MultiFab beta_cc(bs,Ncomp,1); // cell-centered beta setup_coeffs(bs, alpha, beta, geom, beta_cc); MultiFab alpha4, beta4; if (do_4th) { alpha4.define(bs, Ncomp, 4, Fab_allocate); beta4.define(bs, Ncomp, 3, Fab_allocate); setup_coeffs4(bs, alpha4, beta4, geom); } MultiFab anaSoln; if (comp_norm || plot_err || plot_asol) { anaSoln.define(bs, Ncomp, 0, Fab_allocate); compute_analyticSolution(anaSoln,Array<Real>(BL_SPACEDIM,0.5)); if (plot_asol) { writePlotFile("ASOL", anaSoln, geom); } } // Allocate the solution array // Set the number of ghost cells in the solution array. MultiFab soln(bs, Ncomp, 1); MultiFab soln4; if (do_4th) { soln4.define(bs, Ncomp, 3, Fab_allocate); } MultiFab gphi(bs, BL_SPACEDIM, 0); #ifdef USEHYPRE if (solver_type == Hypre || solver_type == All) { if (ParallelDescriptor::IOProcessor()) { std::cout << "----------------------------------------" << std::endl; std::cout << "Solving with Hypre " << std::endl; } solve(soln, anaSoln, gphi, a, b, alpha, beta, beta_cc, rhs, bs, geom, Hypre); } #endif if (solver_type == BoxLib_C || solver_type == All) { if (ParallelDescriptor::IOProcessor()) { std::cout << "----------------------------------------" << std::endl; std::cout << "Solving with BoxLib C++ solver " << std::endl; } solve(soln, anaSoln, gphi, a, b, alpha, beta, beta_cc, rhs, bs, geom, BoxLib_C); } if (solver_type == BoxLib_C4 || solver_type == All) { if (ParallelDescriptor::IOProcessor()) { std::cout << "----------------------------------------" << std::endl; std::cout << "Solving with BoxLib C++ 4th order solver " << std::endl; } solve4(soln4, anaSoln, a, b, alpha4, beta4, rhs, bs, geom); } #ifdef USE_F90_SOLVERS if (solver_type == BoxLib_F || solver_type == All) { if (ParallelDescriptor::IOProcessor()) { std::cout << "----------------------------------------" << std::endl; std::cout << "Solving with BoxLib F90 solver " << std::endl; } solve(soln, anaSoln, gphi, a, b, alpha, beta, beta_cc, rhs, bs, geom, BoxLib_F); } #endif #ifdef USEHPGMG if (solver_type == HPGMG || solver_type == All) { if (ParallelDescriptor::IOProcessor()) { std::cout << "----------------------------------------" << std::endl; std::cout << "Solving with HPGMG solver " << std::endl; } solve(soln, anaSoln, gphi, a, b, alpha, beta, beta_cc, rhs, bs, geom, HPGMG); } #endif if (ParallelDescriptor::IOProcessor()) { std::cout << "----------------------------------------" << std::endl; } BL_PROFILE_VAR_STOP(pmain); BoxLib::Finalize(); }