Ejemplo n.º 1
0
    void average_face_to_cellcenter (MultiFab& cc, const PArray<MultiFab>& fc, const Geometry& geom)
    {
	BL_ASSERT(cc.nComp() >= BL_SPACEDIM);
	BL_ASSERT(fc.size() == BL_SPACEDIM);
	BL_ASSERT(fc[0].nComp() == 1); // We only expect fc to have the gradient perpendicular to the face

	const Real* dx     = geom.CellSize();
	const Real* problo = geom.ProbLo();
	int coord_type = Geometry::Coord();

#ifdef _OPENMP
#pragma omp parallel
#endif
	for (MFIter mfi(cc,true); mfi.isValid(); ++mfi) 
	{
	    const Box& bx = mfi.tilebox();

	    BL_FORT_PROC_CALL(BL_AVG_FC_TO_CC,bl_avg_fc_to_cc)
		(bx.loVect(), bx.hiVect(),
		 BL_TO_FORTRAN(cc[mfi]),
		 D_DECL(BL_TO_FORTRAN(fc[0][mfi]),
			BL_TO_FORTRAN(fc[1][mfi]),
			BL_TO_FORTRAN(fc[2][mfi])),
		 dx, problo, coord_type);
	}
    }
Ejemplo n.º 2
0
    void average_face_to_cellcenter (MultiFab& cc, int dcomp, const std::vector<MultiFab*>& fc)
    {
	BL_ASSERT(cc.nComp() >= dcomp + BL_SPACEDIM);
	BL_ASSERT(fc.size() == BL_SPACEDIM);
	BL_ASSERT(fc[0]->nComp() == 1);

	Real dx[3] = {1.0,1.0,1.0};
	Real problo[3] = {0.,0.,0.};
	int coord_type = 0;

#ifdef _OPENMP
#pragma omp parallel
#endif
	for (MFIter mfi(cc,true); mfi.isValid(); ++mfi) 
	{
	    const Box& bx = mfi.tilebox();

	    BL_FORT_PROC_CALL(BL_AVG_FC_TO_CC,bl_avg_fc_to_cc)
		(bx.loVect(), bx.hiVect(),
		 BL_TO_FORTRAN_N(cc[mfi],dcomp),
		 D_DECL(BL_TO_FORTRAN((*fc[0])[mfi]),
			BL_TO_FORTRAN((*fc[1])[mfi]),
			BL_TO_FORTRAN((*fc[2])[mfi])),
		 dx, problo, coord_type);
	}
    }
Ejemplo n.º 3
0
Real
Adv::estTimeStep (Real)
{
    // This is just a dummy value to start with 
    Real dt_est  = 1.0e+20;

    const Real* dx = geom.CellSize();
    const Real* prob_lo = geom.ProbLo();
    const Real cur_time = state[State_Type].curTime();
    const MultiFab& S_new = get_new_data(State_Type);

#ifdef _OPENMP
#pragma omp parallel reduction(min:dt_est)
#endif
    {
	FArrayBox uface[BL_SPACEDIM];

	for (MFIter mfi(S_new, true); mfi.isValid(); ++mfi)
	{
	    for (int i = 0; i < BL_SPACEDIM ; i++) {
		const Box& bx = mfi.nodaltilebox(i);
		uface[i].resize(bx,1);
	    }

	    BL_FORT_PROC_CALL(GET_FACE_VELOCITY,get_face_velocity)
		(level, cur_time,
		 D_DECL(BL_TO_FORTRAN(uface[0]),
			BL_TO_FORTRAN(uface[1]),
			BL_TO_FORTRAN(uface[2])),
		 dx, prob_lo);

	    for (int i = 0; i < BL_SPACEDIM; ++i) {
		Real umax = uface[i].norm(0);
		if (umax > 1.e-100) {
		    dt_est = std::min(dt_est, dx[i] / umax);
		}
	    }
	}
    }

    ParallelDescriptor::ReduceRealMin(dt_est);
    dt_est *= cfl;

    if (verbose && ParallelDescriptor::IOProcessor())
	std::cout << "Adv::estTimeStep at level " << level << ":  dt_est = " << dt_est << std::endl;
    
    return dt_est;
}
Ejemplo n.º 4
0
    void average_edge_to_cellcenter (MultiFab& cc, int dcomp, const std::vector<MultiFab*>& edge)
    {
	BL_ASSERT(cc.nComp() >= dcomp + BL_SPACEDIM);
	BL_ASSERT(edge.size() == BL_SPACEDIM);
	BL_ASSERT(edge[0]->nComp() == 1);
#ifdef _OPENMP
#pragma omp parallel
#endif
	for (MFIter mfi(cc,true); mfi.isValid(); ++mfi) 
	{
	    const Box& bx = mfi.tilebox();

	    BL_FORT_PROC_CALL(BL_AVG_EG_TO_CC,bl_avg_eg_to_cc)
		(bx.loVect(), bx.hiVect(),
		 BL_TO_FORTRAN_N(cc[mfi],dcomp),
		 D_DECL(BL_TO_FORTRAN((*edge[0])[mfi]),
			BL_TO_FORTRAN((*edge[1])[mfi]),
			BL_TO_FORTRAN((*edge[2])[mfi])));
	}	
    }
Ejemplo n.º 5
0
    void average_cellcenter_to_face (PArray<MultiFab>& fc, const MultiFab& cc, const Geometry& geom)
    {
	BL_ASSERT(cc.nComp() == 1);
	BL_ASSERT(cc.nGrow() >= 1);
	BL_ASSERT(fc.size() == BL_SPACEDIM);
	BL_ASSERT(fc[0].nComp() == 1); // We only expect fc to have the gradient perpendicular to the face

	const Real* dx     = geom.CellSize();
	const Real* problo = geom.ProbLo();
	int coord_type = Geometry::Coord();

#ifdef _OPENMP
#pragma omp parallel
#endif
	for (MFIter mfi(cc,true); mfi.isValid(); ++mfi) 
	{
	    const Box& xbx = mfi.nodaltilebox(0);
#if (BL_SPACEDIM > 1)
	    const Box& ybx = mfi.nodaltilebox(1);
#endif
#if (BL_SPACEDIM == 3)
	    const Box& zbx = mfi.nodaltilebox(2);
#endif
	    
	    BL_FORT_PROC_CALL(BL_AVG_CC_TO_FC,bl_avg_cc_to_fc)
		(xbx.loVect(), xbx.hiVect(),
#if (BL_SPACEDIM > 1)
		 ybx.loVect(), ybx.hiVect(),
#endif
#if (BL_SPACEDIM == 3)
		 zbx.loVect(), zbx.hiVect(),
#endif
		 D_DECL(BL_TO_FORTRAN(fc[0][mfi]),
			BL_TO_FORTRAN(fc[1][mfi]),
			BL_TO_FORTRAN(fc[2][mfi])),
		 BL_TO_FORTRAN(cc[mfi]),
		 dx, problo, coord_type);
	}
    }
Ejemplo n.º 6
0
void advance (MultiFab* old_phi, MultiFab* new_phi, Real* dx, Real dt, Geometry geom)
{
    int Ncomp = old_phi->nComp();

    // Fill the ghost cells of each grid from the other grids
    Real t1 = ParallelDescriptor::second();
    old_phi->FillBoundary(geom.periodicity());
    FB_time += ParallelDescriptor::second() - t1;

    Real t0 = ParallelDescriptor::second();
    
    if (do_tiling) {
#ifdef _OPENMP
#pragma omp parallel
#endif
	for ( MFIter mfi(*old_phi,true); mfi.isValid(); ++mfi )
	{
	    const Box& bx = mfi.tilebox();
	
	    advance_phi(bx.loVect(), bx.hiVect(),
			BL_TO_FORTRAN((*old_phi)[mfi]),
			BL_TO_FORTRAN((*new_phi)[mfi]),
			Ncomp,dx, dt);
	}
    } else {
	for ( MFIter mfi(*old_phi); mfi.isValid(); ++mfi )
	{
	    const Box& bx = mfi.validbox();
	
	    advance_phi2(bx.loVect(), bx.hiVect(),
			 BL_TO_FORTRAN((*old_phi)[mfi]),
			 BL_TO_FORTRAN((*new_phi)[mfi]),
			 Ncomp,dx, dt);
	}
    }
    
    kernel_time += ParallelDescriptor::second() - t0;
}
Ejemplo n.º 7
0
    void average_down (MultiFab& S_fine, MultiFab& S_crse, const Geometry& fgeom, const Geometry& cgeom, 
                       int scomp, int ncomp, const IntVect& ratio)
    {
  
        if (S_fine.is_nodal() || S_crse.is_nodal())
        {
            BoxLib::Error("Can't use BoxLib::average_down for nodal MultiFab!");
        }

#if (BL_SPACEDIM == 3)
	BoxLib::average_down(S_fine, S_crse, scomp, ncomp, ratio);
	return;
#else

        BL_ASSERT(S_crse.nComp() == S_fine.nComp());

        //
        // Coarsen() the fine stuff on processors owning the fine data.
        //
	const BoxArray& fine_BA = S_fine.boxArray();
        BoxArray crse_S_fine_BA = fine_BA; 
	crse_S_fine_BA.coarsen(ratio);

        MultiFab crse_S_fine(crse_S_fine_BA,ncomp,0);

	MultiFab fvolume;
	fgeom.GetVolume(fvolume, fine_BA, 0);

#ifdef _OPENMP
#pragma omp parallel
#endif
        for (MFIter mfi(crse_S_fine,true); mfi.isValid(); ++mfi)
        {
            //  NOTE: The tilebox is defined at the coarse level.
            const Box& tbx = mfi.tilebox();

            //  NOTE: We copy from component scomp of the fine fab into component 0 of the crse fab
            //        because the crse fab is a temporary which was made starting at comp 0, it is
            //        not part of the actual crse multifab which came in.
            BL_FORT_PROC_CALL(BL_AVGDOWN_WITH_VOL,bl_avgdown_with_vol)
                (tbx.loVect(), tbx.hiVect(),
                 BL_TO_FORTRAN_N(S_fine[mfi],scomp),
                 BL_TO_FORTRAN_N(crse_S_fine[mfi],0),
                 BL_TO_FORTRAN(fvolume[mfi]),
                 ratio.getVect(),&ncomp);
	}

        S_crse.copy(crse_S_fine,0,scomp,ncomp);
#endif
   }
Ejemplo n.º 8
0
    // Average fine face-based MultiFab onto crse fine-centered MultiFab.
    // This routine assumes that the crse BoxArray is a coarsened version of the fine BoxArray.
    void average_down_faces (PArray<MultiFab>& fine, PArray<MultiFab>& crse, IntVect& ratio)
    {
	BL_ASSERT(crse.size()  == BL_SPACEDIM);
	BL_ASSERT(fine.size()  == BL_SPACEDIM);
	BL_ASSERT(crse[0].nComp() == 1);
	BL_ASSERT(fine[0].nComp() == 1);

#ifdef _OPENMP
#pragma omp parallel
#endif
        for (int n=0; n<BL_SPACEDIM; ++n) {
            for (MFIter mfi(crse[n],true); mfi.isValid(); ++mfi)
            {
                const Box& tbx = mfi.tilebox();

                BL_FORT_PROC_CALL(BL_AVGDOWN_FACES,bl_avgdown_faces)
                    (tbx.loVect(),tbx.hiVect(),
                     BL_TO_FORTRAN(fine[n][mfi]),
                     BL_TO_FORTRAN(crse[n][mfi]),
                     ratio.getVect(),n);
            }
        }
    }
Ejemplo n.º 9
0
Real
Adv::advance (Real time,
              Real dt,
              int  iteration,
              int  ncycle)
{
    for (int k = 0; k < NUM_STATE_TYPE; k++) {
        state[k].allocOldData();
        state[k].swapTimeLevels(dt);
    }

    MultiFab& S_new = get_new_data(State_Type);

    const Real prev_time = state[State_Type].prevTime();
    const Real cur_time = state[State_Type].curTime();
    const Real ctr_time = 0.5*(prev_time + cur_time);

    const Real* dx = geom.CellSize();
    const Real* prob_lo = geom.ProbLo();

    //
    // Get pointers to Flux registers, or set pointer to zero if not there.
    //
    FluxRegister *fine    = 0;
    FluxRegister *current = 0;
    
    int finest_level = parent->finestLevel();

    if (do_reflux && level < finest_level) {
	fine = &getFluxReg(level+1);
	fine->setVal(0.0);
    }

    if (do_reflux && level > 0) {
	current = &getFluxReg(level);
    }

    MultiFab fluxes[BL_SPACEDIM];

    if (do_reflux)
    {
	for (int j = 0; j < BL_SPACEDIM; j++)
	{
	    BoxArray ba = S_new.boxArray();
	    ba.surroundingNodes(j);
	    fluxes[j].define(ba, NUM_STATE, 0, Fab_allocate);
	}
    }

    // State with ghost cells
    MultiFab Sborder(grids, NUM_STATE, NUM_GROW);
    FillPatch(*this, Sborder, NUM_GROW, time, State_Type, 0, NUM_STATE);

#ifdef _OPENMP
#pragma omp parallel
#endif
    {
	FArrayBox flux[BL_SPACEDIM], uface[BL_SPACEDIM];

	for (MFIter mfi(S_new, true); mfi.isValid(); ++mfi)
	{
	    const Box& bx = mfi.tilebox();

	    const FArrayBox& statein = Sborder[mfi];
	    FArrayBox& stateout      =   S_new[mfi];

	    // Allocate fabs for fluxes and Godunov velocities.
	    for (int i = 0; i < BL_SPACEDIM ; i++) {
		const Box& bxtmp = BoxLib::surroundingNodes(bx,i);
		flux[i].resize(bxtmp,NUM_STATE);
		uface[i].resize(BoxLib::grow(bxtmp,1),1);
	    }

	    get_face_velocity(level, ctr_time,
			      D_DECL(BL_TO_FORTRAN(uface[0]),
				     BL_TO_FORTRAN(uface[1]),
				     BL_TO_FORTRAN(uface[2])),
			      dx, prob_lo);

            advect(time, bx.loVect(), bx.hiVect(),
		   BL_TO_FORTRAN_3D(statein), 
		   BL_TO_FORTRAN_3D(stateout),
		   D_DECL(BL_TO_FORTRAN_3D(uface[0]),
			  BL_TO_FORTRAN_3D(uface[1]),
			  BL_TO_FORTRAN_3D(uface[2])),
		   D_DECL(BL_TO_FORTRAN_3D(flux[0]), 
			  BL_TO_FORTRAN_3D(flux[1]), 
			  BL_TO_FORTRAN_3D(flux[2])), 
		   dx, dt);

	    if (do_reflux) {
		for (int i = 0; i < BL_SPACEDIM ; i++)
		    fluxes[i][mfi].copy(flux[i],mfi.nodaltilebox(i));	  
	    }
	}
    }

    if (do_reflux) {
	if (current) {
	    for (int i = 0; i < BL_SPACEDIM ; i++)
		current->FineAdd(fluxes[i],i,0,0,NUM_STATE,1.);
	}
	if (fine) {
	    for (int i = 0; i < BL_SPACEDIM ; i++)
		fine->CrseInit(fluxes[i],i,0,0,NUM_STATE,-1.);
	}
    }

    return dt;
}
Ejemplo n.º 10
0
int
main (int argc, char* argv[])
{
    BoxLib::Initialize(argc,argv);

    // What time is it now?  We'll use this to compute total run time.
    Real strt_time = ParallelDescriptor::second();

    std::cout << std::setprecision(15);

    // ParmParse is way of reading inputs from the inputs file
    ParmParse pp;
    
    int verbose = 0;
    pp.query("verbose", verbose);
    
    // We need to get n_cell from the inputs file - this is the number of cells on each side of 
    //   a square (or cubic) domain.
    int n_cell;
    pp.get("n_cell",n_cell);

    int max_grid_size;
    pp.get("max_grid_size",max_grid_size);

    // Default plot_int to 1, allow us to set it to something else in the inputs file
    //  If plot_int < 0 then no plot files will be written
    int plot_int = 1;
    pp.query("plot_int",plot_int);

    // Default nsteps to 0, allow us to set it to something else in the inputs file
    int nsteps   = 0;
    pp.query("nsteps",nsteps);

    pp.query("do_tiling", do_tiling);

    // Define a single box covering the domain
    IntVect dom_lo(0,0,0);
    IntVect dom_hi(n_cell-1,n_cell-1,n_cell-1);
    Box domain(dom_lo,dom_hi);

    // Initialize the boxarray "bs" from the single box "bx"
    BoxArray bs(domain);

    // Break up boxarray "bs" into chunks no larger than "max_grid_size" along a direction
    bs.maxSize(max_grid_size);

    // This defines the physical size of the box.  Right now the box is [-1,1] in each direction.
    RealBox real_box;
    for (int n = 0; n < BL_SPACEDIM; n++) {
	real_box.setLo(n,-1.0);
	real_box.setHi(n, 1.0);
    }

    // This says we are using Cartesian coordinates
    int coord = 0;
    
    // This sets the boundary conditions to be doubly or triply periodic
    int is_per[BL_SPACEDIM];
    for (int i = 0; i < BL_SPACEDIM; i++) is_per[i] = 1; 
    
    // This defines a Geometry object which is useful for writing the plotfiles  
    Geometry geom(domain,&real_box,coord,is_per);

    // This defines the mesh spacing
    Real dx[BL_SPACEDIM];
    for ( int n=0; n<BL_SPACEDIM; n++ )
	dx[n] = ( geom.ProbHi(n) - geom.ProbLo(n) )/domain.length(n);

    // Nghost = number of ghost cells for each array 
    int Nghost = 1;

    // Ncomp = number of components for each array
    int Ncomp  = 1;
    pp.query("ncomp", Ncomp);

    // Allocate space for the old_phi and new_phi -- we define old_phi and new_phi as
    PArray < MultiFab > phis(2, PArrayManage);
    phis.set(0, new MultiFab(bs, Ncomp, Nghost));
    phis.set(1, new MultiFab(bs, Ncomp, Nghost));
    MultiFab* old_phi = &phis[0];
    MultiFab* new_phi = &phis[1];

    // Initialize both to zero (just because)
    old_phi->setVal(0.0);
    new_phi->setVal(0.0);

    // Initialize phi by calling a Fortran routine.
    // MFIter = MultiFab Iterator
#ifdef _OPENMP
#pragma omp parallel
#endif
    for ( MFIter mfi(*new_phi,true); mfi.isValid(); ++mfi )
    {
	const Box& bx = mfi.tilebox();

	init_phi(bx.loVect(),bx.hiVect(), 
		 BL_TO_FORTRAN((*new_phi)[mfi]),Ncomp,
		 dx,geom.ProbLo(),geom.ProbHi());
    }

    // Call the compute_dt routine to return a time step which we will pass to advance
    Real dt = compute_dt(dx[0]);

    // Write a plotfile of the initial data if plot_int > 0 (plot_int was defined in the inputs file)
    if (plot_int > 0) {
	int n = 0;
	const std::string& pltfile = BoxLib::Concatenate("plt",n,5);
	writePlotFile(pltfile, *new_phi, geom);
    }

    Real adv_start_time = ParallelDescriptor::second();

    for (int n = 1; n <= nsteps; n++)
    {
	// Swap the pointers so we don't have to allocate and de-allocate data
	std::swap(old_phi, new_phi);

	// new_phi = old_phi + dt * (something)
	advance(old_phi, new_phi, dx, dt, geom); 

	// Tell the I/O Processor to write out which step we're doing
	if (verbose && ParallelDescriptor::IOProcessor())
	    std::cout << "Advanced step " << n << std::endl;

	// Write a plotfile of the current data (plot_int was defined in the inputs file)
	if (plot_int > 0 && n%plot_int == 0) {
	    const std::string& pltfile = BoxLib::Concatenate("plt",n,5);
	    writePlotFile(pltfile, *new_phi, geom);
	}
    }

    // Call the timer again and compute the maximum difference between the start time and stop time
    //   over all processors
    Real advance_time = ParallelDescriptor::second() - adv_start_time;
    Real stop_time = ParallelDescriptor::second() - strt_time;
    const int IOProc = ParallelDescriptor::IOProcessorNumber();
    ParallelDescriptor::ReduceRealMax(stop_time,IOProc);
    ParallelDescriptor::ReduceRealMax(advance_time,IOProc);
    ParallelDescriptor::ReduceRealMax(kernel_time,IOProc);
    ParallelDescriptor::ReduceRealMax(FB_time,IOProc);
    
    // Tell the I/O Processor to write out the "run time"
    if (ParallelDescriptor::IOProcessor()) {
	std::cout << "Kernel    time = " << kernel_time << std::endl;
	std::cout << "FB        time = " << FB_time << std::endl;
	std::cout << "Advance   time = " << advance_time << std::endl;
	std::cout << "Total run time = " << stop_time << std::endl;
    }
    
    // Say goodbye to MPI, etc...
    BoxLib::Finalize();
}