예제 #1
0
void
FMultiGrid::Boundary::set_bndry_values (MacBndry& bndry, IntVect crse_ratio)
{
    // The values of phys_bc & ref_ratio do not matter
    // because we are not going to use those parts of MacBndry.
    IntVect ref_ratio = IntVect::TheZeroVector();
    Array<int> lo_bc(BL_SPACEDIM, 0);
    Array<int> hi_bc(BL_SPACEDIM, 0);
    BCRec phys_bc(lo_bc.dataPtr(), hi_bc.dataPtr());

    if (crse_phi == 0 && phi == 0) 
    {
	bndry.setHomogValues(phys_bc, ref_ratio);
    }
    else if (crse_phi == 0 && phi != 0)
    {
	bndry.setBndryValues(*phi, 0, 0, phi->nComp(), phys_bc); 
    }
    else if (crse_phi != 0 && phi != 0) 
    {
	BL_ASSERT(crse_ratio != IntVect::TheZeroVector());

	const int ncomp      = phi->nComp();
	const int in_rad     = 0;
	const int out_rad    = 1;
	const int extent_rad = 2;

	BoxArray crse_boxes(phi->boxArray());
	crse_boxes.coarsen(crse_ratio);

	BndryRegister crse_br(crse_boxes, in_rad, out_rad, extent_rad, ncomp);
	crse_br.copyFrom(*crse_phi, crse_phi->nGrow(), 0, 0, ncomp);

	bndry.setBndryValues(crse_br, 0, *phi, 0, 0, ncomp, crse_ratio, phys_bc);
    }
    else
    {
	BoxLib::Abort("FMultiGrid::Boundary::build_bndry: How did we get here?");
    }
}
예제 #2
0
void main_main ()
{
  // What time is it now?  We'll use this to compute total run time.
  Real strt_time = ParallelDescriptor::second();

  std::cout << std::setprecision(15);

  int n_cell, max_grid_size, nsteps, plot_int, is_periodic[BL_SPACEDIM];

  // Boundary conditions
  Array<int> lo_bc(BL_SPACEDIM), hi_bc(BL_SPACEDIM);

  // inputs parameters
  {
    // ParmParse is way of reading inputs from the inputs file
    ParmParse pp;

    // We need to get n_cell from the inputs file - this is the number of cells on each side of 
    //   a square (or cubic) domain.
    pp.get("n_cell",n_cell);

    // Default nsteps to 0, allow us to set it to something else in the inputs file
    pp.get("max_grid_size",max_grid_size);

    // Default plot_int to 1, allow us to set it to something else in the inputs file
    //  If plot_int < 0 then no plot files will be written
    plot_int = 1;
    pp.query("plot_int",plot_int);

    // Default nsteps to 0, allow us to set it to something else in the inputs file
    nsteps = 0;
    pp.query("nsteps",nsteps);

    // Boundary conditions - default is periodic (INT_DIR)
    for (int i = 0; i < BL_SPACEDIM; ++i)
    {
      lo_bc[i] = hi_bc[i] = INT_DIR;   // periodic boundaries are interior boundaries
    }
    pp.queryarr("lo_bc",lo_bc,0,BL_SPACEDIM);
    pp.queryarr("hi_bc",hi_bc,0,BL_SPACEDIM);
  }

  // make BoxArray and Geometry
  BoxArray ba;
  Geometry geom;
  {
    IntVect dom_lo(IntVect(D_DECL(0,0,0)));
    IntVect dom_hi(IntVect(D_DECL(n_cell-1, n_cell-1, n_cell-1)));
    Box domain(dom_lo, dom_hi);

    // Initialize the boxarray "ba" from the single box "bx"
    ba.define(domain);
    // Break up boxarray "ba" into chunks no larger than "max_grid_size" along a direction
    ba.maxSize(max_grid_size);

    // This defines the physical size of the box.  Right now the box is [-1,1] in each direction.
    RealBox real_box;
    for (int n = 0; n < BL_SPACEDIM; n++) {
      real_box.setLo(n,-1.0);
      real_box.setHi(n, 1.0);
    }

    // This says we are using Cartesian coordinates
    int coord = 0;
	
    // This sets the boundary conditions to be doubly or triply periodic
    int is_periodic[BL_SPACEDIM];
    for (int i = 0; i < BL_SPACEDIM; i++)
    {
      is_periodic[i] = 0;
      if (lo_bc[i] == 0 && hi_bc[i] == 0) {
	is_periodic[i] = 1;
      }
    }

    // This defines a Geometry object
    geom.define(domain,&real_box,coord,is_periodic);
  }

  // Boundary conditions
  PhysBCFunct physbcf;
  BCRec bcr(&lo_bc[0], &hi_bc[0]);
  physbcf.define(geom, bcr, BndryFunctBase(phifill)); // phifill is a fortran function

  // define dx[]
  const Real* dx = geom.CellSize();

  // Nghost = number of ghost cells for each array 
  int Nghost = 1;

  // Ncomp = number of components for each array
  int Ncomp  = 1;

  // time = starting time in the simulation
  Real time = 0.0;
  
  // we allocate two phi multifabs; one will store the old state, the other the new
  // we swap the indices each time step to avoid copies of new into old
  PArray<MultiFab> phi(2, PArrayManage);
  phi.set(0, new MultiFab(ba, Ncomp, Nghost));
  phi.set(1, new MultiFab(ba, Ncomp, Nghost));

  // Initialize both to zero (just because)
  phi[0].setVal(0.0);
  phi[1].setVal(0.0);

  // Initialize phi[init_index] by calling a Fortran routine.
  // MFIter = MultiFab Iterator
  int init_index = 0;
  for ( MFIter mfi(phi[init_index]); mfi.isValid(); ++mfi )
  {
    const Box& bx = mfi.validbox();

    init_phi(phi[init_index][mfi].dataPtr(),
	     bx.loVect(), bx.hiVect(), &Nghost,
	     geom.CellSize(), geom.ProbLo(), geom.ProbHi());
  }

  // compute the time step
  Real dt = 0.9*dx[0]*dx[0] / (2.0*BL_SPACEDIM);

  // Write a plotfile of the initial data if plot_int > 0 (plot_int was defined in the inputs file)
  if (plot_int > 0)
  {
    int n = 0;
    const std::string& pltfile = BoxLib::Concatenate("plt",n,5);
    writePlotFile(pltfile, phi[init_index], geom, time);
  }

  // build the flux multifabs
  PArray<MultiFab> flux(BL_SPACEDIM, PArrayManage);
  for (int dir = 0; dir < BL_SPACEDIM; dir++)
  {
    // flux(dir) has one component, zero ghost cells, and is nodal in direction dir
    BoxArray edge_ba = ba;
    edge_ba.surroundingNodes(dir);
    flux.set(dir, new MultiFab(edge_ba, 1, 0));
  }

  int old_index = init_index;
  for (int n = 1; n <= nsteps; n++, old_index = 1 - old_index)
  {
    int new_index = 1 - old_index;

    // new_phi = old_phi + dt * (something)
    advance(phi[old_index], phi[new_index], flux, time, dt, geom, physbcf, bcr); 
    time = time + dt;

    // Tell the I/O Processor to write out which step we're doing
    if (ParallelDescriptor::IOProcessor())
      std::cout << "Advanced step " << n << std::endl;

    // Write a plotfile of the current data (plot_int was defined in the inputs file)
    if (plot_int > 0 && n%plot_int == 0)
    {
      const std::string& pltfile = BoxLib::Concatenate("plt",n,5);
      writePlotFile(pltfile, phi[new_index], geom, time);
    }
  }

  // Call the timer again and compute the maximum difference between the start time and stop time
  //   over all processors
  Real stop_time = ParallelDescriptor::second() - strt_time;
  const int IOProc = ParallelDescriptor::IOProcessorNumber();
  ParallelDescriptor::ReduceRealMax(stop_time,IOProc);

  // Tell the I/O Processor to write out the "run time"
  if (ParallelDescriptor::IOProcessor()) {
    std::cout << "Run time = " << stop_time << std::endl;
  }
}
예제 #3
0
파일: testVI.cpp 프로젝트: dwillcox/BoxLib
int
main (int   argc,
      char* argv[])
{
    BoxLib::Initialize(argc,argv);

    std::cout << std::setprecision(10);

    if (argc < 2)
    {
      std::cerr << "usage:  " << argv[0] << " inputsfile [options]" << '\n';
      exit(-1);
    }

    ParmParse pp;
    
    int n;

    BoxArray bs;
    
#if BL_SPACEDIM == 2
    Box domain(IntVect(0,0),IntVect(11,11));
    std::string boxfile("gr.2_small_a") ;
#elif BL_SPACEDIM == 3
    Box domain(IntVect(0,0,0),IntVect(11,11,11));
    std::string boxfile("grids/gr.3_2x3x4") ;
#endif
    pp.query("boxes", boxfile);

    std::ifstream ifs(boxfile.c_str(), std::ios::in);

    if (!ifs)
    {
        std::string msg = "problem opening grids file: ";
        msg += boxfile.c_str();
        BoxLib::Abort(msg.c_str());
    }

    ifs >> domain;

    if (ParallelDescriptor::IOProcessor())
	std::cout << "domain: " << domain << std::endl;

    bs.readFrom(ifs);

    if (ParallelDescriptor::IOProcessor())
	std::cout << "grids:\n" << bs << std::endl;

    Geometry geom(domain);
    const Real* H = geom.CellSize();
    int ratio=2; pp.query("ratio", ratio);

    // allocate/init soln and rhs
    int Ncomp=BL_SPACEDIM;
    int Nghost=0;
    int Ngrids=bs.size();
    MultiFab soln(bs, Ncomp, Nghost, Fab_allocate); soln.setVal(0.0);
    MultiFab out(bs, Ncomp, Nghost, Fab_allocate); 
    MultiFab rhs(bs, Ncomp, Nghost, Fab_allocate); rhs.setVal(0.0);
    for(MFIter rhsmfi(rhs); rhsmfi.isValid(); ++rhsmfi)
    {
	FORT_FILLRHS(rhs[rhsmfi].dataPtr(),
		     ARLIM(rhs[rhsmfi].loVect()),ARLIM(rhs[rhsmfi].hiVect()),
		     H,&Ncomp);
    }
    
    // Create the boundary object
    MCViscBndry vbd(bs,geom);

    BCRec phys_bc;
    Array<int> lo_bc(BL_SPACEDIM), hi_bc(BL_SPACEDIM);
    pp.getarr("lo_bc",lo_bc,0,BL_SPACEDIM);
    pp.getarr("hi_bc",hi_bc,0,BL_SPACEDIM);
    for (int i = 0; i < BL_SPACEDIM; i++)
    {
        phys_bc.setLo(i,lo_bc[i]);
        phys_bc.setHi(i,hi_bc[i]);
    }

    
    // Create the BCRec's interpreted by ViscBndry objects
#if BL_SPACEDIM==2
    Array<BCRec> pbcarray(4);
    pbcarray[0] = BCRec(D_DECL(REFLECT_ODD,REFLECT_EVEN,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[1] = BCRec(D_DECL(REFLECT_EVEN,REFLECT_ODD,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[2] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[3] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
#elif BL_SPACEDIM==3
    Array<BCRec> pbcarray(12);

#if 1
    pbcarray[0] = BCRec(EXT_DIR,EXT_DIR,EXT_DIR,EXT_DIR,EXT_DIR,EXT_DIR);
    pbcarray[1] = BCRec(EXT_DIR,EXT_DIR,EXT_DIR,EXT_DIR,EXT_DIR,EXT_DIR);
    pbcarray[2] = BCRec(EXT_DIR,EXT_DIR,EXT_DIR,EXT_DIR,EXT_DIR,EXT_DIR);
    pbcarray[3] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[4] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[5] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[6] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[7] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[8] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[9] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[10] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			 D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
    pbcarray[11] = BCRec(D_DECL(EXT_DIR,EXT_DIR,EXT_DIR),
			 D_DECL(EXT_DIR,EXT_DIR,EXT_DIR));
#else
    for (int i = 0; i < 12; i++)
        pbcarray[i] = phys_bc;
#endif
#endif
    
    Nghost = 1; // need space for bc info
    MultiFab fine(bs,Ncomp,Nghost,Fab_allocate);
    for(MFIter finemfi(fine); finemfi.isValid(); ++finemfi)
    {
	FORT_FILLFINE(fine[finemfi].dataPtr(),
		      ARLIM(fine[finemfi].loVect()),ARLIM(fine[finemfi].hiVect()),
		      H,&Ncomp);
    }

    // Create "background coarse data"
    Box crse_bx = Box(domain).coarsen(ratio).grow(1);
    BoxArray cba(crse_bx);
    cba.maxSize(32);
    Real h_crse[BL_SPACEDIM];
    for (n=0; n<BL_SPACEDIM; n++) h_crse[n] = H[n]*ratio;

    MultiFab crse_mf(cba, Ncomp, 0);
//    FArrayBox crse_fab(crse_bx,Ncomp);

    for (MFIter mfi(crse_mf); mfi.isValid(); ++mfi)
    {
        FORT_FILLCRSE(crse_mf[mfi].dataPtr(),
                      ARLIM(crse_mf[mfi].loVect()),ARLIM(crse_mf[mfi].hiVect()),
                      h_crse,&Ncomp);
    }


    
    // Create coarse boundary register, fill w/data from coarse FAB
    int bndry_InRad=0;
    int bndry_OutRad=1;
    int bndry_Extent=1;
    BoxArray cbs = BoxArray(bs).coarsen(ratio);
    BndryRegister cbr(cbs,bndry_InRad,bndry_OutRad,bndry_Extent,Ncomp);
    for (OrientationIter face; face; ++face)
    {
	Orientation f = face();
	FabSet& bnd_fs(cbr[f]);
	bnd_fs.copyFrom(crse_mf, 0, 0, 0, Ncomp);
    }
  
    // Interpolate crse data to fine boundary, where applicable
    int cbr_Nstart=0;
    int fine_Nstart=0;
    int bndry_Nstart=0;
    vbd.setBndryValues(cbr,cbr_Nstart,fine,fine_Nstart,
		       bndry_Nstart,Ncomp,ratio,pbcarray);
  
    Nghost = 1; // other variables don't need extra space
    
    DivVis lp(vbd,H);
    
    Real a = 0.0;
    Real b[BL_SPACEDIM];
    b[0] = 1.0;
    b[1] = 1.0;
#if BL_SPACEDIM>2
    b[2] = 1.0;
#endif
    MultiFab  acoefs;
    int NcompA = (BL_SPACEDIM == 2  ?  2  :  1);
    acoefs.define(bs, NcompA, Nghost, Fab_allocate);
    acoefs.setVal(a);
    MultiFab bcoefs[BL_SPACEDIM];
    for (n=0; n<BL_SPACEDIM; ++n)
    {
	BoxArray bsC(bs);
	bcoefs[n].define(bsC.surroundingNodes(n), 1,
			 Nghost, Fab_allocate);
#if 1
	for(MFIter bmfi(bcoefs[n]); bmfi.isValid(); ++bmfi)
	{
	    FORT_MAKEMU(bcoefs[n][bmfi].dataPtr(),
			ARLIM(bcoefs[n][bmfi].loVect()),ARLIM(bcoefs[n][bmfi].hiVect()),H,n);
	}
#else
	bcoefs[n].setVal(b[n]);
#endif
    } // -->> over dimension
    lp.setCoefficients(acoefs, bcoefs);
#if 1
    lp.maxOrder(4);
#endif
    
    Nghost = 1;
    MultiFab tsoln(bs, Ncomp, Nghost, Fab_allocate); 
    tsoln.setVal(0.0);
#if 1
    tsoln.copy(fine);
#endif
#if 0
    // testing apply
    lp.apply(out,tsoln);
    Box subbox = out[0].box();
    Real n1 = out[0].norm(subbox,1,0,BL_SPACEDIM)*pow(H[0],BL_SPACEDIM);
    ParallelDescriptor::ReduceRealSum(n1);
    if (ParallelDescriptor::IOProcessor())
    {
	cout << "n1 output is "<<n1<<std::endl;
    }
    out.minus(rhs,0,BL_SPACEDIM,0);
    // special to single grid prob
    Real n2 = out[0].norm(subbox,1,0,BL_SPACEDIM)*pow(H[0],BL_SPACEDIM);
    ParallelDescriptor::ReduceRealSum(n2);
    if (ParallelDescriptor::IOProcessor())
    {
	cout << "n2 difference is "<<n2<<std::endl;
    }
#if 0
    subbox.grow(-1);
    Real n3 = out[0].norm(subbox,0,0,BL_SPACEDIM)*pow(H[0],BL_SPACEDIM);
    ParallelDescriptor::ReduceRealMax(n3);
    if (ParallelDescriptor::IOProcessor())
    {
	cout << "n3 difference is "<<n3<<std::endl;
    }
#endif
    
#endif
    
    const IntVect refRatio(D_DECL(2,2,2));
    const Real bgVal = 1.0;
    
#if 1
#ifndef NDEBUG
    // testing flux computation
    BoxArray xfluxbox(bs);
    xfluxbox.surroundingNodes(0);
    MultiFab xflux(xfluxbox,Ncomp,Nghost,Fab_allocate);
    xflux.setVal(1.e30);
    BoxArray yfluxbox(bs);
    yfluxbox.surroundingNodes(1);
    MultiFab yflux(yfluxbox,Ncomp,Nghost,Fab_allocate);
    yflux.setVal(1.e30);
#if BL_SPACEDIM>2
    BoxArray zfluxbox(bs);
    zfluxbox.surroundingNodes(2);
    MultiFab zflux(zfluxbox,Ncomp,Nghost,Fab_allocate);
    zflux.setVal(1.e30);
#endif
    lp.compFlux(xflux,
		yflux,
#if BL_SPACEDIM>2
		zflux,
#endif
		tsoln);
    
    // Write fluxes
    //writeMF(&xflux,"xflux.mfab");
    //writeMF(&yflux,"yflux.mfab");
#if BL_SPACEDIM>2
    //writeMF(&zflux,"zflux.mfab");
#endif
    
#endif
#endif
    
    Real tolerance = 1.0e-10; pp.query("tol", tolerance);
    Real tolerance_abs = 1.0e-10; pp.query("tol_abs", tolerance_abs);

#if 0
    cout << "Bndry Data object:" << std::endl;
    cout << lp.bndryData() << std::endl;
#endif
    
#if 0
    bool use_mg_pre = false;
    MCCGSolver cg(lp,use_mg_pre);
    cg.solve(soln,rhs,tolerance,tolerance_abs);
#else
    MCMultiGrid mg(lp);
    mg.solve(soln,rhs,tolerance,tolerance_abs);
#endif

#if 0
    cout << "MCLinOp object:" << std::endl;
    cout << lp << std::endl;
#endif
    
    VisMF::Write(soln,"soln");
    
#if 0
    // apply operator to soln to see if really satisfies eqn
    tsoln.copy(soln);
    lp.apply(out,tsoln);
    soln.copy(out);
    // Output "apply" results on soln
    VisMF::Write(soln,"apply");

    // Compute truncation
    for (MFIter smfi(soln); smfi.isValid(); ++smfi)
    {
	soln[smfi] -= fine[smfi];
    }
    for( int icomp=0; icomp < BL_SPACEDIM ; icomp++ )
    {
	Real solnMin = soln.min(icomp);
	Real solnMax = soln.max(icomp);
	ParallelDescriptor::ReduceRealMin(solnMin);
	ParallelDescriptor::ReduceRealMax(solnMax);
	if (ParallelDescriptor::IOProcessor())
	{
	    cout << icomp << "  "<<solnMin << " " << solnMax <<std::endl;
	}
    }
    // Output truncation
    VisMF::Write(soln,"trunc");
#endif

    int dumpLp=0; pp.query("dumpLp",dumpLp);
    bool write_lp = (dumpLp == 1 ? true : false);
    if (write_lp)
	std::cout << lp << std::endl;

    // Output trunc
    ParallelDescriptor::EndParallel();
}