예제 #1
0
    void average_face_to_cellcenter (MultiFab& cc, const PArray<MultiFab>& fc, const Geometry& geom)
    {
	BL_ASSERT(cc.nComp() >= BL_SPACEDIM);
	BL_ASSERT(fc.size() == BL_SPACEDIM);
	BL_ASSERT(fc[0].nComp() == 1); // We only expect fc to have the gradient perpendicular to the face

	const Real* dx     = geom.CellSize();
	const Real* problo = geom.ProbLo();
	int coord_type = Geometry::Coord();

#ifdef _OPENMP
#pragma omp parallel
#endif
	for (MFIter mfi(cc,true); mfi.isValid(); ++mfi) 
	{
	    const Box& bx = mfi.tilebox();

	    BL_FORT_PROC_CALL(BL_AVG_FC_TO_CC,bl_avg_fc_to_cc)
		(bx.loVect(), bx.hiVect(),
		 BL_TO_FORTRAN(cc[mfi]),
		 D_DECL(BL_TO_FORTRAN(fc[0][mfi]),
			BL_TO_FORTRAN(fc[1][mfi]),
			BL_TO_FORTRAN(fc[2][mfi])),
		 dx, problo, coord_type);
	}
    }
예제 #2
0
IntVect
ParticleBase::Index (const ParticleBase& p,
                     const Geometry&     geom)
{
    IntVect iv;

    D_TERM(iv[0]=floor((p.m_pos[0]-geom.ProbLo(0))/geom.CellSize(0));,
예제 #3
0
void advance (MultiFab& old_phi, MultiFab& new_phi, PArray<MultiFab>& flux,
	      Real time, Real dt, const Geometry& geom, PhysBCFunct& physbcf,
	      BCRec& bcr)
{
  // Fill the ghost cells of each grid from the other grids
  // includes periodic domain boundaries
  old_phi.FillBoundary(geom.periodicity());

  // Fill physical boundaries
  physbcf.FillBoundary(old_phi, time);

  int Ncomp = old_phi.nComp();
  int ng_p = old_phi.nGrow();
  int ng_f = flux[0].nGrow();

  const Real* dx = geom.CellSize();

  //
  // Note that this simple example is not optimized.
  // The following two MFIter loops could be merged
  // and we do not have to use flux MultiFab.
  // 

  // Compute fluxes one grid at a time
  for ( MFIter mfi(old_phi); mfi.isValid(); ++mfi )
  {
    const Box& bx = mfi.validbox();

    compute_flux(old_phi[mfi].dataPtr(),
		 &ng_p,
		 flux[0][mfi].dataPtr(),
		 flux[1][mfi].dataPtr(),
#if (BL_SPACEDIM == 3)   
		 flux[2][mfi].dataPtr(),
#endif
		 &ng_f, bx.loVect(), bx.hiVect(), 
		 (geom.Domain()).loVect(),
		 (geom.Domain()).hiVect(),
		 bcr.vect(),
		 &dx[0]);
  }

  // Advance the solution one grid at a time
  for ( MFIter mfi(old_phi); mfi.isValid(); ++mfi )
  {
    const Box& bx = mfi.validbox();

    update_phi(old_phi[mfi].dataPtr(),
	       new_phi[mfi].dataPtr(),
	       &ng_p,
	       flux[0][mfi].dataPtr(),
	       flux[1][mfi].dataPtr(),
#if (BL_SPACEDIM == 3)   
	       flux[2][mfi].dataPtr(),
#endif
	       &ng_f, bx.loVect(), bx.hiVect(), &dx[0] , &dt);
  }
}
예제 #4
0
    void average_cellcenter_to_face (PArray<MultiFab>& fc, const MultiFab& cc, const Geometry& geom)
    {
	BL_ASSERT(cc.nComp() == 1);
	BL_ASSERT(cc.nGrow() >= 1);
	BL_ASSERT(fc.size() == BL_SPACEDIM);
	BL_ASSERT(fc[0].nComp() == 1); // We only expect fc to have the gradient perpendicular to the face

	const Real* dx     = geom.CellSize();
	const Real* problo = geom.ProbLo();
	int coord_type = Geometry::Coord();

#ifdef _OPENMP
#pragma omp parallel
#endif
	for (MFIter mfi(cc,true); mfi.isValid(); ++mfi) 
	{
	    const Box& xbx = mfi.nodaltilebox(0);
#if (BL_SPACEDIM > 1)
	    const Box& ybx = mfi.nodaltilebox(1);
#endif
#if (BL_SPACEDIM == 3)
	    const Box& zbx = mfi.nodaltilebox(2);
#endif
	    
	    BL_FORT_PROC_CALL(BL_AVG_CC_TO_FC,bl_avg_cc_to_fc)
		(xbx.loVect(), xbx.hiVect(),
#if (BL_SPACEDIM > 1)
		 ybx.loVect(), ybx.hiVect(),
#endif
#if (BL_SPACEDIM == 3)
		 zbx.loVect(), zbx.hiVect(),
#endif
		 D_DECL(BL_TO_FORTRAN(fc[0][mfi]),
			BL_TO_FORTRAN(fc[1][mfi]),
			BL_TO_FORTRAN(fc[2][mfi])),
		 BL_TO_FORTRAN(cc[mfi]),
		 dx, problo, coord_type);
	}
    }
예제 #5
0
void MGRadBndry::setBndryConds(const BCRec& bc,
			     const Geometry& geom, IntVect& ratio)
{

//  NOTE: ALL BCLOC VALUES ARE NOW DEFINED AS A LENGTH IN PHYSICAL DIMENSIONS
//        *RELATIVE* TO THE FACE, NOT IN ABSOLUTE PHYSICAL SPACE
  const BoxArray& grids = boxes();
  int ngrds = grids.size();
  const Real* dx = geom.CellSize();
  const Box& domain = geom.Domain();

  for (OrientationIter fi; fi; ++fi) {
    Orientation face(fi());
    Array<Real> &bloc = bcloc[face];
    Array<RadBoundCond> &bctag = bcond[face];

    int dir = face.coordDir();
    Real delta = dx[dir]*ratio[dir];
    int p_bc = (face.isLow() ? bc.lo(dir) : bc.hi(dir));

    for (int i = 0; i < ngrds; i++) {
      const Box& grd = grids[i];

      if (domain[face] == grd[face] && !geom.isPeriodic(dir)) {
/*
	// All physical bc values are located on face
	if (p_bc == EXT_DIR) {
	  bctag[i] = LO_DIRICHLET;
	  bloc[i] = 0.;
	}
	else if (p_bc == EXTRAP || p_bc == HOEXTRAP || p_bc == REFLECT_EVEN) {
	  bctag[i] = LO_NEUMANN;
	  bloc[i] = 0.;
	}
	else if (p_bc == REFLECT_ODD) {
	  bctag[i] = LO_REFLECT_ODD;
	  bloc[i] = 0.;
	}
*/
	if (p_bc == LO_DIRICHLET   || p_bc == LO_NEUMANN ||
	    p_bc == LO_REFLECT_ODD) {
	  bctag[i] = p_bc;
	  bloc[i] = 0.;
	}
	else if (p_bc == LO_MARSHAK || p_bc == LO_SANCHEZ_POMRANING) {
	  bctag[i] = p_bc;
	  //gives asymmetric, second-order version of Marshak b.c.
          // (worked for bbmg, works with nonsymmetric hypre solvers):
	  bloc[i] = 0.;
	  //gives symmetric version of Marshak b.c.
          //(hypre symmetric solvers ignore bloc and do this automatically):
	  //bloc[i] = -0.5 * dx[dir];
	}
	else {
	  cerr << "MGRadBndry---Not a recognized boundary condition" << endl;
	  exit(1);
	}
      }
      else {
	// internal bndry
	bctag[i] = LO_DIRICHLET;
	bloc[i] = 0.5*delta;
      }
    }
  }
}
예제 #6
0
void
writePlotFile (const std::string& dir,
               const MultiFab&    mf,
               const Geometry&    geom)
{
    //
    // Only let 64 CPUs be writing at any one time.
    //
    VisMF::SetNOutFiles(64);
    //
    // Only the I/O processor makes the directory if it doesn't already exist.
    //
    if (ParallelDescriptor::IOProcessor())
        if (!BoxLib::UtilCreateDirectory(dir, 0755))
            BoxLib::CreateDirectoryFailed(dir);
    //
    // Force other processors to wait till directory is built.
    //
    ParallelDescriptor::Barrier();

    std::string HeaderFileName = dir + "/Header";

    VisMF::IO_Buffer io_buffer(VisMF::IO_Buffer_Size);

    std::ofstream HeaderFile;

    HeaderFile.rdbuf()->pubsetbuf(io_buffer.dataPtr(), io_buffer.size());

    if (ParallelDescriptor::IOProcessor())
    {
        //
        // Only the IOProcessor() writes to the header file.
        //
        HeaderFile.open(HeaderFileName.c_str(), std::ios::out|std::ios::trunc|std::ios::binary);
        if (!HeaderFile.good())
            BoxLib::FileOpenFailed(HeaderFileName);
        HeaderFile << "NavierStokes-V1.1\n";

        HeaderFile << mf.nComp() << '\n';

        for (int ivar = 1; ivar <= mf.nComp(); ivar++) {
          HeaderFile << "Variable " << ivar << "\n";
        }

        HeaderFile << BL_SPACEDIM << '\n';
        HeaderFile << 0 << '\n';
        HeaderFile << 0 << '\n';
        for (int i = 0; i < BL_SPACEDIM; i++)
            HeaderFile << geom.ProbLo(i) << ' ';
        HeaderFile << '\n';
        for (int i = 0; i < BL_SPACEDIM; i++)
            HeaderFile << geom.ProbHi(i) << ' ';
        HeaderFile << '\n';
        HeaderFile << '\n';
        HeaderFile << geom.Domain() << ' ';
        HeaderFile << '\n';
        HeaderFile << 0 << ' ';
        HeaderFile << '\n';
        for (int k = 0; k < BL_SPACEDIM; k++)
            HeaderFile << geom.CellSize()[k] << ' ';
        HeaderFile << '\n';
        HeaderFile << geom.Coord() << '\n';
        HeaderFile << "0\n";
    }
    // Build the directory to hold the MultiFab at this level.
    // The name is relative to the directory containing the Header file.
    //
    static const std::string BaseName = "/Cell";

    std::string Level = BoxLib::Concatenate("Level_", 0, 1);
    //
    // Now for the full pathname of that directory.
    //
    std::string FullPath = dir;
    if (!FullPath.empty() && FullPath[FullPath.length()-1] != '/')
        FullPath += '/';
    FullPath += Level;
    //
    // Only the I/O processor makes the directory if it doesn't already exist.
    //
    if (ParallelDescriptor::IOProcessor())
        if (!BoxLib::UtilCreateDirectory(FullPath, 0755))
            BoxLib::CreateDirectoryFailed(FullPath);
    //
    // Force other processors to wait till directory is built.
    //
    ParallelDescriptor::Barrier();

    if (ParallelDescriptor::IOProcessor())
    {
        HeaderFile << 0 << ' ' << mf.boxArray().size() << ' ' << 0 << '\n';
        HeaderFile << 0 << '\n';

        for (int i = 0; i < mf.boxArray().size(); ++i)
        {
            RealBox loc = RealBox(mf.boxArray()[i],geom.CellSize(),geom.ProbLo());
            for (int n = 0; n < BL_SPACEDIM; n++)
                HeaderFile << loc.lo(n) << ' ' << loc.hi(n) << '\n';
        }

        std::string PathNameInHeader = Level;
        PathNameInHeader += BaseName;
        HeaderFile << PathNameInHeader << '\n';
    }
    //
    // Use the Full pathname when naming the MultiFab.
    //
    std::string TheFullPath = FullPath;
    TheFullPath += BaseName;

    VisMF::Write(mf,TheFullPath);
}
예제 #7
0
void main_main ()
{
  // What time is it now?  We'll use this to compute total run time.
  Real strt_time = ParallelDescriptor::second();

  std::cout << std::setprecision(15);

  int n_cell, max_grid_size, nsteps, plot_int, is_periodic[BL_SPACEDIM];

  // Boundary conditions
  Array<int> lo_bc(BL_SPACEDIM), hi_bc(BL_SPACEDIM);

  // inputs parameters
  {
    // ParmParse is way of reading inputs from the inputs file
    ParmParse pp;

    // We need to get n_cell from the inputs file - this is the number of cells on each side of 
    //   a square (or cubic) domain.
    pp.get("n_cell",n_cell);

    // Default nsteps to 0, allow us to set it to something else in the inputs file
    pp.get("max_grid_size",max_grid_size);

    // Default plot_int to 1, allow us to set it to something else in the inputs file
    //  If plot_int < 0 then no plot files will be written
    plot_int = 1;
    pp.query("plot_int",plot_int);

    // Default nsteps to 0, allow us to set it to something else in the inputs file
    nsteps = 0;
    pp.query("nsteps",nsteps);

    // Boundary conditions - default is periodic (INT_DIR)
    for (int i = 0; i < BL_SPACEDIM; ++i)
    {
      lo_bc[i] = hi_bc[i] = INT_DIR;   // periodic boundaries are interior boundaries
    }
    pp.queryarr("lo_bc",lo_bc,0,BL_SPACEDIM);
    pp.queryarr("hi_bc",hi_bc,0,BL_SPACEDIM);
  }

  // make BoxArray and Geometry
  BoxArray ba;
  Geometry geom;
  {
    IntVect dom_lo(IntVect(D_DECL(0,0,0)));
    IntVect dom_hi(IntVect(D_DECL(n_cell-1, n_cell-1, n_cell-1)));
    Box domain(dom_lo, dom_hi);

    // Initialize the boxarray "ba" from the single box "bx"
    ba.define(domain);
    // Break up boxarray "ba" into chunks no larger than "max_grid_size" along a direction
    ba.maxSize(max_grid_size);

    // This defines the physical size of the box.  Right now the box is [-1,1] in each direction.
    RealBox real_box;
    for (int n = 0; n < BL_SPACEDIM; n++) {
      real_box.setLo(n,-1.0);
      real_box.setHi(n, 1.0);
    }

    // This says we are using Cartesian coordinates
    int coord = 0;
	
    // This sets the boundary conditions to be doubly or triply periodic
    int is_periodic[BL_SPACEDIM];
    for (int i = 0; i < BL_SPACEDIM; i++)
    {
      is_periodic[i] = 0;
      if (lo_bc[i] == 0 && hi_bc[i] == 0) {
	is_periodic[i] = 1;
      }
    }

    // This defines a Geometry object
    geom.define(domain,&real_box,coord,is_periodic);
  }

  // Boundary conditions
  PhysBCFunct physbcf;
  BCRec bcr(&lo_bc[0], &hi_bc[0]);
  physbcf.define(geom, bcr, BndryFunctBase(phifill)); // phifill is a fortran function

  // define dx[]
  const Real* dx = geom.CellSize();

  // Nghost = number of ghost cells for each array 
  int Nghost = 1;

  // Ncomp = number of components for each array
  int Ncomp  = 1;

  // time = starting time in the simulation
  Real time = 0.0;
  
  // we allocate two phi multifabs; one will store the old state, the other the new
  // we swap the indices each time step to avoid copies of new into old
  PArray<MultiFab> phi(2, PArrayManage);
  phi.set(0, new MultiFab(ba, Ncomp, Nghost));
  phi.set(1, new MultiFab(ba, Ncomp, Nghost));

  // Initialize both to zero (just because)
  phi[0].setVal(0.0);
  phi[1].setVal(0.0);

  // Initialize phi[init_index] by calling a Fortran routine.
  // MFIter = MultiFab Iterator
  int init_index = 0;
  for ( MFIter mfi(phi[init_index]); mfi.isValid(); ++mfi )
  {
    const Box& bx = mfi.validbox();

    init_phi(phi[init_index][mfi].dataPtr(),
	     bx.loVect(), bx.hiVect(), &Nghost,
	     geom.CellSize(), geom.ProbLo(), geom.ProbHi());
  }

  // compute the time step
  Real dt = 0.9*dx[0]*dx[0] / (2.0*BL_SPACEDIM);

  // Write a plotfile of the initial data if plot_int > 0 (plot_int was defined in the inputs file)
  if (plot_int > 0)
  {
    int n = 0;
    const std::string& pltfile = BoxLib::Concatenate("plt",n,5);
    writePlotFile(pltfile, phi[init_index], geom, time);
  }

  // build the flux multifabs
  PArray<MultiFab> flux(BL_SPACEDIM, PArrayManage);
  for (int dir = 0; dir < BL_SPACEDIM; dir++)
  {
    // flux(dir) has one component, zero ghost cells, and is nodal in direction dir
    BoxArray edge_ba = ba;
    edge_ba.surroundingNodes(dir);
    flux.set(dir, new MultiFab(edge_ba, 1, 0));
  }

  int old_index = init_index;
  for (int n = 1; n <= nsteps; n++, old_index = 1 - old_index)
  {
    int new_index = 1 - old_index;

    // new_phi = old_phi + dt * (something)
    advance(phi[old_index], phi[new_index], flux, time, dt, geom, physbcf, bcr); 
    time = time + dt;

    // Tell the I/O Processor to write out which step we're doing
    if (ParallelDescriptor::IOProcessor())
      std::cout << "Advanced step " << n << std::endl;

    // Write a plotfile of the current data (plot_int was defined in the inputs file)
    if (plot_int > 0 && n%plot_int == 0)
    {
      const std::string& pltfile = BoxLib::Concatenate("plt",n,5);
      writePlotFile(pltfile, phi[new_index], geom, time);
    }
  }

  // Call the timer again and compute the maximum difference between the start time and stop time
  //   over all processors
  Real stop_time = ParallelDescriptor::second() - strt_time;
  const int IOProc = ParallelDescriptor::IOProcessorNumber();
  ParallelDescriptor::ReduceRealMax(stop_time,IOProc);

  // Tell the I/O Processor to write out the "run time"
  if (ParallelDescriptor::IOProcessor()) {
    std::cout << "Run time = " << stop_time << std::endl;
  }
}