// ----------------------------------------------------------------------------- // ----------------------------------------------------------------------------- void MappedLevelFluxRegister::reflux (LevelData<FArrayBox>& a_levelDiv, const LevelData<FArrayBox>& a_crseCCJinv, const Real a_scale) { CH_assert(a_levelDiv.getBoxes() == a_crseCCJinv.getBoxes()); const Interval& interv = a_levelDiv.interval(); this->reflux(a_levelDiv, a_scale, interv, interv, a_crseCCJinv); }
void initData(LevelData<FArrayBox>& a_data, const Real a_dx) { DataIterator dit = a_data.dataIterator(); const DisjointBoxLayout& interiorBoxes = a_data.getBoxes(); for (dit.begin(); dit.ok(); ++dit) { // first set to a bogus value which will persist in ghost cells // after initialization a_data[dit()].setVal(1.0e9); // this will be slow, but who cares? FArrayBox& localData = a_data[dit()]; BoxIterator boxIt(interiorBoxes[dit()]); Real localVal; for (boxIt.begin(); boxIt.ok(); ++boxIt) { const IntVect& loc = boxIt(); for (int comp=0; comp<localData.nComp(); comp++) { localVal = dataVal(loc, comp); localData(loc, comp) = localVal; } } } }
int scopingTest() { Vector<Box> boxes; int retflag = 0; setGrids(boxes); LevelData<FArrayBox> levelFab; makeLevelData(boxes, levelFab); const DisjointBoxLayout& dbl = levelFab.getBoxes(); DataIterator dit = dbl.dataIterator(); int ivec = 0; for (dit.begin(); dit.ok(); ++dit) { if (!dbl.check(dit())) { if (verbose) pout() << indent2 << pgmname << ": failed at box " << ivec << endl; retflag = 1; } else { levelFab[dit].setVal(0.); } if (retflag > 0) break; ivec++; } return retflag; }
// ------------------------------------------------------------ // version of 27 March 2003: no finer level Real integral(const LevelData<NodeFArrayBox>& a_phi, const ProblemDomain& a_domain, const LayoutData< Vector<IntVectSet> >& a_IVSVext, const Real a_dx, const Interval a_comps, bool a_verbose) { // Idea: copy a_phi to temp, then zero out temp on // exterior nodes of grids at this level. int ncomps = a_comps.size(); const DisjointBoxLayout& grids = a_phi.getBoxes(); LevelData<NodeFArrayBox> temp(grids, ncomps); // Copy a_phi to temp. Interval newcomps(0, ncomps-1); for (DataIterator dit(grids.dataIterator()); dit.ok(); ++dit) { const NodeFArrayBox& nfab = a_phi[dit()]; const Box& bx = grids.get(dit()); temp[dit()].copy(bx, newcomps, bx, nfab, a_comps); } // Zero out temp on exterior nodes. zeroBoundaryNodes(temp, a_IVSVext); Real integralLevel = integral(temp, a_dx, newcomps, a_verbose); return integralLevel; }
// ----------------------------------------------------------------------------- // Semi-implicitly handles the gravity forcing and projection. // The old* inputs should be the values at t^n. // The new* inputs should be the updated values from the TGA solver. // ----------------------------------------------------------------------------- void AMRNavierStokes::doCCIGProjection (LevelData<FArrayBox>& a_newVel, LevelData<FArrayBox>& a_newB, const LevelData<FArrayBox>& a_oldVel, const LevelData<FArrayBox>& a_oldB, const LevelData<FluxBox>& a_advVel, const Real a_oldTime, const Real a_dt, const bool a_doProj) { CH_TIME("AMRNavierStokes::doCCIGProjection"); const Real halfTime = a_oldTime + 0.5 * a_dt; const Real newTime = a_oldTime + 1.0 * a_dt; const Real dummyTime = -1.0e300; const GeoSourceInterface& geoSource = *(m_levGeoPtr->getGeoSourcePtr()); const RealVect& dx = m_levGeoPtr->getDx(); const DisjointBoxLayout& grids = a_newVel.getBoxes(); DataIterator dit = grids.dataIterator(); // 1. Compute the background buoyancy, N, Dinv, etc... // Fill the FC background buoyancy field. LevelData<FluxBox> bbar(grids, 1, 2*IntVect::Unit); for (dit.reset(); dit.ok(); ++dit) { FluxBox& bbarFB = bbar[dit]; D_TERM(m_physBCPtr->setBackgroundScalar(bbarFB[0], 0, *m_levGeoPtr, dit(), dummyTime);, m_physBCPtr->setBackgroundScalar(bbarFB[1], 0, *m_levGeoPtr, dit(), dummyTime);,
// --------------------------------------------------------- void AMRNavierStokes::computeLapVel(LevelData<FArrayBox>& a_lapVel, LevelData<FArrayBox>& a_vel, const LevelData<FArrayBox>* a_crseVelPtr) { // set BC's VelBCHolder velBC(m_physBCPtr->viscousVelFuncBC()); bool isHomogeneous = false; m_velocityAMRPoissonOp.applyOp(a_lapVel, a_vel, a_crseVelPtr, isHomogeneous, velBC); // may need to extend lapVel to cover ghost cells as well { BCHolder viscBC = m_physBCPtr->viscousFuncBC(); const DisjointBoxLayout& grids = a_lapVel.getBoxes(); DataIterator dit = a_lapVel.dataIterator(); for (dit.reset(); dit.ok(); ++dit) { viscBC(a_lapVel[dit], grids[dit], m_problem_domain, m_dx, false); // not homogeneous } } // finally, do exchange a_lapVel.exchange(a_lapVel.interval()); }
// --------------------------------------------------------- void AMRNavierStokes::computeLapScal(LevelData<FArrayBox>& a_lapScal, LevelData<FArrayBox>& a_scal, const BCHolder& a_physBC, const LevelData<FArrayBox>* a_crseScalPtr) { m_scalarsAMRPoissonOp.setBC(a_physBC); bool isHomogeneous = false; if (a_crseScalPtr != NULL) { m_scalarsAMRPoissonOp.AMROperatorNF(a_lapScal, a_scal, *a_crseScalPtr, isHomogeneous); } else { m_scalarsAMRPoissonOp.applyOpI(a_lapScal, a_scal, isHomogeneous); } BCHolder viscBC = m_physBCPtr->viscousFuncBC(); DataIterator dit = a_lapScal.dataIterator(); const DisjointBoxLayout& grids = a_lapScal.getBoxes(); for (dit.reset(); dit.ok(); ++dit) { viscBC(a_lapScal[dit], grids[dit], m_problem_domain, m_dx, false); // not homogeneous } // finally, do exchange a_lapScal.exchange(a_lapScal.interval()); }
// ----------------------------------------------------------------------------- // Adds coarse cell values directly to all overlying fine cells. // ----------------------------------------------------------------------------- void ConstInterpPS::prolongIncrement (LevelData<FArrayBox>& a_phiThisLevel, const LevelData<FArrayBox>& a_correctCoarse) { CH_TIME("ConstInterpPS::prolongIncrement"); // Gather grids, domains, refinement ratios... const DisjointBoxLayout& fineGrids = a_phiThisLevel.getBoxes(); const DisjointBoxLayout& crseGrids = a_correctCoarse.getBoxes(); CH_assert(fineGrids.compatible(crseGrids)); const ProblemDomain& fineDomain = fineGrids.physDomain(); const ProblemDomain& crseDomain = crseGrids.physDomain(); const IntVect mgRefRatio = fineDomain.size() / crseDomain.size(); CH_assert(mgRefRatio.product() > 1); DataIterator dit = fineGrids.dataIterator(); for (dit.reset(); dit.ok(); ++dit) { // Create references for convenience FArrayBox& fineFAB = a_phiThisLevel[dit]; const FArrayBox& crseFAB = a_correctCoarse[dit]; const Box& fineValid = fineGrids[dit]; // To make things easier, we will offset the // coarse and fine data boxes to zero. const IntVect& fiv = fineValid.smallEnd(); const IntVect civ = coarsen(fiv, mgRefRatio); // Correct the fine data FORT_CONSTINTERPPS ( CHF_FRA_SHIFT(fineFAB, fiv), CHF_CONST_FRA_SHIFT(crseFAB, civ), CHF_BOX_SHIFT(fineValid, fiv), CHF_CONST_INTVECT(mgRefRatio)); } }
// ------------------------------------------------------------ // version of 27 March 2003 Real norm(const LevelData<NodeFArrayBox>& a_phi, const ProblemDomain& a_domain, const DisjointBoxLayout& a_finerGridsCoarsened, const LayoutData< Vector<Box> >& a_IVSVext, const LayoutData< Vector<Box> >& a_IVSVintFinerCoarsened, const int a_nRefFine, const Real a_dx, const Interval& a_comps, const int a_p, bool a_verbose) { // Idea: copy a_phi to temp, then zero out temp on: // - exterior nodes of grids at this level; // - projections of interior nodes of the finer grids. int ncomps = a_comps.size(); const DisjointBoxLayout& grids = a_phi.getBoxes(); LevelData<NodeFArrayBox> temp(grids, ncomps); // Copy a_phi to temp. Interval newcomps(0, ncomps-1); for (DataIterator dit(grids.dataIterator()); dit.ok(); ++dit) { const NodeFArrayBox& nfab = a_phi[dit()]; const Box& bx = grids.get(dit()); temp[dit()].copy(bx, newcomps, bx, nfab, a_comps); } // Zero out temp on exterior nodes. zeroBoundaryNodes(temp, a_IVSVext); // Define zeroCoarsened to be all zero on the coarsened finer grids. LevelData<NodeFArrayBox> zeroCoarsened(a_finerGridsCoarsened, ncomps, IntVect::Zero); for (DataIterator dit(a_finerGridsCoarsened.dataIterator()); dit.ok(); ++dit) zeroCoarsened[dit()].getFab().setVal(0.); // Set temp to zero on interior nodes of coarsened finer grids. copyInteriorNodes(temp, zeroCoarsened, a_IVSVintFinerCoarsened); Real normLevel = norm(temp, a_dx, a_p, newcomps, a_verbose); return normLevel; }
// ------------------------------------------------------------ // version of 27 March 2003 Real norm(const LevelData<NodeFArrayBox>& a_phi, const ProblemDomain& a_domain, const DisjointBoxLayout* a_finerGridsPtr, const int a_nRefFine, const Real a_dx, const Interval& a_comps, const int a_p, bool a_verbose) { const DisjointBoxLayout& grids = a_phi.getBoxes(); LayoutData< Vector<IntVectSet> > IVSVint, IVSVext; // LayoutData< Vector<Box> > IVSVint, IVSVext; interiorBoundaryNodes(IVSVint, grids, a_domain); exteriorBoundaryNodes(IVSVext, IVSVint, grids); Real normLevel = 0.; if (a_finerGridsPtr == NULL) { normLevel = norm(a_phi, a_domain, IVSVext, a_dx, a_comps, a_p, a_verbose); } else { DisjointBoxLayout coarsenedFinerGrids; coarsen(coarsenedFinerGrids, *a_finerGridsPtr, a_nRefFine); LayoutData< Vector<IntVectSet> > IVSVintFinerCoarsened; // LayoutData< Vector<Box> > IVSVintFinerCoarsened; interiorBoundaryNodes(IVSVintFinerCoarsened, grids, coarsenedFinerGrids, a_domain); normLevel = norm(a_phi, a_domain, coarsenedFinerGrids, IVSVext, IVSVintFinerCoarsened, a_nRefFine, a_dx, a_comps, a_p, a_verbose); } return normLevel; }
void initData(LevelData<FArrayBox>& a_data, const Real a_dx) { DataIterator dit = a_data.dataIterator(); const DisjointBoxLayout& interiorBoxes = a_data.getBoxes(); for (dit.begin(); dit.ok(); ++dit) { // first set to a bogus value which will persist in ghost cells // after initialization a_data[dit()].setVal(1.0e9); // this will be slow, but who cares? FArrayBox& localData = a_data[dit()]; BoxIterator boxIt(interiorBoxes[dit()]); Real localVal; for (boxIt.begin(); boxIt.ok(); ++boxIt) { const IntVect& loc = boxIt(); // linear profile //localVal = a_dx*(D_TERM(loc[0]+0.5, // + loc[1]+0.5, // + loc[2]+0.5)); // quadratic profile localVal = a_dx*a_dx*(D_TERM6((loc[0]+0.5)*(loc[0]+0.5), + (loc[1]+0.5)*(loc[1]+0.5), + (loc[2]+0.5)*(loc[2]+0.5), + (loc[3]+0.5)*(loc[3]+0.5), + (loc[4]+0.5)*(loc[4]+0.5), + (loc[5]+0.5)*(loc[5]+0.5))); localData(loc) = localVal; } } }
// ----------------------------------------------------------------------------- // Adds coarse cell values directly to all overlying fine cells, // then removes the average from the fine result. // ----------------------------------------------------------------------------- void ZeroAvgConstInterpPS::prolongIncrement (LevelData<FArrayBox>& a_phiThisLevel, const LevelData<FArrayBox>& a_correctCoarse) { CH_TIME("ZeroAvgConstInterpPS::prolongIncrement"); // Gather grids, domains, refinement ratios... const DisjointBoxLayout& fineGrids = a_phiThisLevel.getBoxes(); const DisjointBoxLayout& crseGrids = a_correctCoarse.getBoxes(); CH_assert(fineGrids.compatible(crseGrids)); const ProblemDomain& fineDomain = fineGrids.physDomain(); const ProblemDomain& crseDomain = crseGrids.physDomain(); const IntVect mgRefRatio = fineDomain.size() / crseDomain.size(); CH_assert(mgRefRatio.product() > 1); // These will accumulate averaging data. Real localSum = 0.0; Real localVol = 0.0; CH_assert(!m_CCJinvPtr.isNull()); CH_assert(m_dxProduct > 0.0); DataIterator dit = fineGrids.dataIterator(); for (dit.reset(); dit.ok(); ++dit) { // Create references for convenience FArrayBox& fineFAB = a_phiThisLevel[dit]; const FArrayBox& crseFAB = a_correctCoarse[dit]; const Box& fineValid = fineGrids[dit]; const FArrayBox& JinvFAB = (*m_CCJinvPtr)[dit]; // To make things easier, we will offset the // coarse and fine data boxes to zero. const IntVect& fiv = fineValid.smallEnd(); const IntVect civ = coarsen(fiv, mgRefRatio); // Correct the fine data FORT_CONSTINTERPWITHAVGPS ( CHF_FRA_SHIFT(fineFAB, fiv), CHF_CONST_FRA_SHIFT(crseFAB, civ), CHF_BOX_SHIFT(fineValid, fiv), CHF_CONST_INTVECT(mgRefRatio), CHF_CONST_FRA1_SHIFT(JinvFAB,0,fiv), CHF_CONST_REAL(m_dxProduct), CHF_REAL(localVol), CHF_REAL(localSum)); } // Compute global sum (this is where the MPI communication happens) #ifdef CH_MPI Real globalSum = 0.0; int result = MPI_Allreduce(&localSum, &globalSum, 1, MPI_CH_REAL, MPI_SUM, Chombo_MPI::comm); if (result != MPI_SUCCESS) { MayDay::Error("Sorry, but I had a communication error in ZeroAvgConstInterpPS::prolongIncrement"); } Real globalVol = 0.0; result = MPI_Allreduce(&localVol, &globalVol, 1, MPI_CH_REAL, MPI_SUM, Chombo_MPI::comm); if (result != MPI_SUCCESS) { MayDay::Error("Sorry, but I had a communication error in ZeroAvgConstInterpPS::prolongIncrement"); } #else Real globalSum = localSum; Real globalVol = localVol; #endif // Remove the average from phi. Real avgPhi = globalSum / globalVol; for (dit.reset(); dit.ok(); ++dit) { a_phiThisLevel[dit] -= avgPhi; } }
void VCAMRPoissonOp2::reflux(const LevelData<FArrayBox>& a_phiFine, const LevelData<FArrayBox>& a_phi, LevelData<FArrayBox>& a_residual, AMRLevelOp<LevelData<FArrayBox> >* a_finerOp) { CH_TIME("VCAMRPoissonOp2::reflux"); int ncomp = 1; ProblemDomain fineDomain = refine(m_domain, m_refToFiner); LevelFluxRegister levfluxreg(a_phiFine.disjointBoxLayout(), a_phi.disjointBoxLayout(), fineDomain, m_refToFiner, ncomp); levfluxreg.setToZero(); Interval interv(0,a_phi.nComp()-1); DataIterator dit = a_phi.dataIterator(); for (dit.reset(); dit.ok(); ++dit) { const FArrayBox& coarfab = a_phi[dit]; const FluxBox& coarBCoef = (*m_bCoef)[dit]; const Box& gridBox = a_phi.getBoxes()[dit]; for (int idir = 0; idir < SpaceDim; idir++) { FArrayBox coarflux; Box faceBox = surroundingNodes(gridBox, idir); getFlux(coarflux, coarfab, coarBCoef , faceBox, idir); Real scale = 1.0; levfluxreg.incrementCoarse(coarflux, scale,dit(), interv,interv,idir); } } LevelData<FArrayBox>& p = ( LevelData<FArrayBox>&)a_phiFine; // has to be its own object because the finer operator // owns an interpolator and we have no way of getting to it VCAMRPoissonOp2* finerAMRPOp = (VCAMRPoissonOp2*) a_finerOp; QuadCFInterp& quadCFI = finerAMRPOp->m_interpWithCoarser; quadCFI.coarseFineInterp(p, a_phi); // p.exchange(a_phiFine.interval()); // BVS is pretty sure this is not necesary. IntVect phiGhost = p.ghostVect(); DataIterator ditf = a_phiFine.dataIterator(); const DisjointBoxLayout& dblFine = a_phiFine.disjointBoxLayout(); for (ditf.reset(); ditf.ok(); ++ditf) { const FArrayBox& phifFab = a_phiFine[ditf]; const FluxBox& fineBCoef = (*(finerAMRPOp->m_bCoef))[ditf]; const Box& gridbox = dblFine.get(ditf()); for (int idir = 0; idir < SpaceDim; idir++) { int normalGhost = phiGhost[idir]; SideIterator sit; for (sit.begin(); sit.ok(); sit.next()) { Side::LoHiSide hiorlo = sit(); Box fabbox; Box facebox; // assumption here that the stencil required // to compute the flux in the normal direction // is 2* the number of ghost cells for phi // (which is a reasonable assumption, and probably // better than just assuming you need one cell on // either side of the interface // (dfm 8-4-06) if (sit() == Side::Lo) { fabbox = adjCellLo(gridbox,idir, 2*normalGhost); fabbox.shift(idir, 1); facebox = bdryLo(gridbox, idir,1); } else { fabbox = adjCellHi(gridbox,idir, 2*normalGhost); fabbox.shift(idir, -1); facebox = bdryHi(gridbox, idir, 1); } // just in case we need ghost cells in the transverse direction // (dfm 8-4-06) for (int otherDir=0; otherDir<SpaceDim; ++otherDir) { if (otherDir != idir) { fabbox.grow(otherDir, phiGhost[otherDir]); } } CH_assert(!fabbox.isEmpty()); FArrayBox phifab(fabbox, a_phi.nComp()); phifab.copy(phifFab); FArrayBox fineflux; getFlux(fineflux, phifab, fineBCoef, facebox, idir, m_refToFiner); Real scale = 1.0; levfluxreg.incrementFine(fineflux, scale, ditf(), interv, interv, idir, hiorlo); } } } Real scale = 1.0/m_dx; levfluxreg.reflux(a_residual, scale); }
// // VCAMRPoissonOp2::reflux() // There are currently the new version (first) and the old version (second) // in this file. Brian asked to preserve the old version in this way for // now. - TJL (12/10/2007) // void VCAMRPoissonOp2::reflux(const LevelData<FArrayBox>& a_phiFine, const LevelData<FArrayBox>& a_phi, LevelData<FArrayBox>& a_residual, AMRLevelOp<LevelData<FArrayBox> >* a_finerOp) { CH_TIMERS("VCAMRPoissonOp2::reflux"); m_levfluxreg.setToZero(); Interval interv(0,a_phi.nComp()-1); CH_TIMER("VCAMRPoissonOp2::reflux::incrementCoarse", t2); CH_START(t2); DataIterator dit = a_phi.dataIterator(); for (dit.reset(); dit.ok(); ++dit) { const FArrayBox& coarfab = a_phi[dit]; const FluxBox& coarBCoef = (*m_bCoef)[dit]; const Box& gridBox = a_phi.getBoxes()[dit]; if (m_levfluxreg.hasCF(dit())) { for (int idir = 0; idir < SpaceDim; idir++) { FArrayBox coarflux; Box faceBox = surroundingNodes(gridBox, idir); getFlux(coarflux, coarfab, coarBCoef, faceBox, idir); Real scale = 1.0; m_levfluxreg.incrementCoarse(coarflux, scale,dit(), interv, interv, idir); } } } CH_STOP(t2); // const cast: OK because we're changing ghost cells only LevelData<FArrayBox>& phiFineRef = ( LevelData<FArrayBox>&)a_phiFine; VCAMRPoissonOp2* finerAMRPOp = (VCAMRPoissonOp2*) a_finerOp; QuadCFInterp& quadCFI = finerAMRPOp->m_interpWithCoarser; quadCFI.coarseFineInterp(phiFineRef, a_phi); // I'm pretty sure this is not necessary. bvs -- flux calculations use // outer ghost cells, but not inner ones // phiFineRef.exchange(a_phiFine.interval()); IntVect phiGhost = phiFineRef.ghostVect(); int ncomps = a_phiFine.nComp(); CH_TIMER("VCAMRPoissonOp2::reflux::incrementFine", t3); CH_START(t3); DataIterator ditf = a_phiFine.dataIterator(); const DisjointBoxLayout& dblFine = a_phiFine.disjointBoxLayout(); for (ditf.reset(); ditf.ok(); ++ditf) { const FArrayBox& phifFab = a_phiFine[ditf]; const FluxBox& fineBCoef = (*(finerAMRPOp->m_bCoef))[ditf]; const Box& gridbox = dblFine.get(ditf()); for (int idir = 0; idir < SpaceDim; idir++) { //int normalGhost = phiGhost[idir]; SideIterator sit; for (sit.begin(); sit.ok(); sit.next()) { if (m_levfluxreg.hasCF(ditf(), sit())) { Side::LoHiSide hiorlo = sit(); Box fluxBox = bdryBox(gridbox,idir,hiorlo,1); FArrayBox fineflux(fluxBox,ncomps); getFlux(fineflux, phifFab, fineBCoef, fluxBox, idir, m_refToFiner); Real scale = 1.0; m_levfluxreg.incrementFine(fineflux, scale, ditf(), interv, interv, idir, hiorlo); } } } } CH_STOP(t3); Real scale = 1.0/m_dx; m_levfluxreg.reflux(a_residual, scale); }