bool RSIBC::tagCellsInit(FArrayBox& markFAB,const Real& threshold) { // If grid spacing > R0 refine otherwise only refine the nucleation patch if(m_dx > m_R0) { // pout() << m_dx << " " << m_R0 << endl; markFAB.setVal(1,bdryLo(m_domain.domainBox(),1,1) & markFAB.box(),0); } else { IntVect nucSm; IntVect nucBg; if(SpaceDim > 0) { nucSm.setVal(0,floor(m_x0/m_dx)); nucBg.setVal(0, ceil(m_x0/m_dx)); } if(SpaceDim > 1) { nucSm.setVal(1,0); nucBg.setVal(1,0); } if(SpaceDim > 2) { nucSm.setVal(2,floor(m_x0/m_dx)); nucBg.setVal(2, ceil(m_x0/m_dx)); } markFAB.setVal(1,Box(nucSm,nucBg)& markFAB.box(),0); } // pout() << m_domain << endl; // FORT_ALLBOUNDREFINE( // CHF_FRA1(markFAB,0), // CHF_CONST_REAL(threshold), // CHF_BOX(markFAB.box())); return true; }
/** Compute dU = dt*dUdt, the change in the conserved variables over the time step. The fluxes are returned are suitable for use in refluxing. This has a default implementation but can be redefined as needed. */ void LinElastPhysics::computeUpdate(FArrayBox& a_dU, FluxBox& a_F, const FArrayBox& a_U, const FluxBox& a_WHalf, const bool& a_useArtificialViscosity, const Real& a_artificialViscosity, const Real& a_currentTime, const Real& a_dx, const Real& a_dt, const Box& a_box) { CH_assert(isDefined()); a_dU.setVal(0.0); for (int idir = 0; idir < SpaceDim; idir++) { // Get flux from WHalf getFlux(a_F[idir],a_WHalf[idir],idir,a_F[idir].box()); if (a_useArtificialViscosity) { artVisc(a_F[idir],a_U, a_artificialViscosity,a_currentTime, idir,a_box); } // Compute flux difference fHi-fLo FArrayBox diff(a_dU.box(), a_dU.nComp()); diff.setVal(0.0); FORT_FLUXDIFFF(CHF_FRA(diff), CHF_CONST_FRA(a_F[idir]), CHF_CONST_INT(idir), CHF_BOX(a_box)); // Add flux difference to dU a_dU += diff; ((LEPhysIBC*)m_bc)->updateBoundary(a_WHalf[idir],idir,a_dt,a_dx,a_currentTime+a_dt,true); } // Multiply dU by dt/dx because that is what the output expects a_dU *= -a_dt / a_dx; }
static void setVal3(const Box& box, int comps, FArrayBox& fab) { if ( SpaceDim < 3 ) { fab.setVal(311); } else { int center = (box.smallEnd()[2] + box.bigEnd()[2])/2; for (int c=0; c<comps; ++c) { for (BoxIterator bit(box); bit.ok(); ++bit) { fab(bit(), c) = c + center + bit()[0]; } } } }
void simpleDivergenceMAC( FArrayBox& a_div, const FluxBox& a_uEdge, const Real a_dx) { a_div.setVal(0.0); const Box& cellBox = a_uEdge.box(); // now loop over coordinate directions and increment divergence for (int dir=0; dir<SpaceDim; dir++) { const FArrayBox& uEdgeDir = a_uEdge[dir]; FORT_DIVERGENCE(CHF_CONST_FRA(uEdgeDir), CHF_FRA(a_div), CHF_BOX(cellBox), CHF_CONST_REAL(a_dx), CHF_INT(dir)); } }
virtual void operator()( const Box& box, int comp, FArrayBox& fab ) const { fab.setVal( m_x, box, comp ); }
// // For testing LevelData::apply()...a function, and a functor: // void leveldataApplyFunc(const Box& box, int comp, FArrayBox& fab) { fab.setVal( 12.34, box, 0 ); }
bool SWIBC::tagCellsInit(FArrayBox& markFAB,const Real& threshold) { // We do this here becauase we need m_dx to be set, and it isn't set when // the object is defined if(!m_isPatchBoxSet) { m_patchBoxes.resize(m_numPatches); // Loop over all the patches and figure out the boxes for(int itor = 0; itor < m_numPatches; itor ++) { IntVect nucSm; IntVect nucBg; int offSet = 0; if(SpaceDim > 0) { nucSm.setVal(0,floor((m_fricBoxCenter[0]+m_xcPatches[itor]-m_xwPatches[itor])/m_dx)); nucBg.setVal(0, ceil((m_fricBoxCenter[0]+m_xcPatches[itor]+m_xwPatches[itor])/m_dx)); } if(SpaceDim > 1) { nucSm.setVal(1,0); nucBg.setVal(1,0); } if(SpaceDim > 2) { nucSm.setVal(2,floor((m_fricBoxCenter[1]+m_zcPatches[itor]-m_zwPatches[itor])/m_dx)); nucBg.setVal(2, ceil((m_fricBoxCenter[1]+m_zcPatches[itor]+m_zwPatches[itor])/m_dx)); } m_patchBoxes[itor] = Box(nucSm,nucBg); m_smoothWidthNumCells = ceil(m_smoothValue / m_dx / 2); m_isPatchBoxSet = true; } } for(int itor = 0; itor < m_numPatches; itor ++) { markFAB.setVal(1,m_patchBoxes[itor] & markFAB.box(),0); } // markFAB.setVal(1,markFAB.box(),0); // for(int itor = 0; itor < m_patchBoxes.capacity(); itor++) // { // markFAB.setVal(1,adjCellLo(m_patchBoxes[itor],0, m_smoothWidthNumCells) & markFAB.box(),0); // markFAB.setVal(1,adjCellHi(m_patchBoxes[itor],0, m_smoothWidthNumCells) & markFAB.box(),0); // markFAB.setVal(1,adjCellLo(m_patchBoxes[itor],0,-m_smoothWidthNumCells) & markFAB.box(),0); // markFAB.setVal(1,adjCellHi(m_patchBoxes[itor],0,-m_smoothWidthNumCells) & markFAB.box(),0); // if(SpaceDim > 2) // { // markFAB.setVal(1,adjCellLo(m_patchBoxes[itor],2, m_smoothWidthNumCells) & markFAB.box(),0); // markFAB.setVal(1,adjCellHi(m_patchBoxes[itor],2, m_smoothWidthNumCells) & markFAB.box(),0); // markFAB.setVal(1,adjCellLo(m_patchBoxes[itor],2,-m_smoothWidthNumCells) & markFAB.box(),0); // markFAB.setVal(1,adjCellHi(m_patchBoxes[itor],2,-m_smoothWidthNumCells) & markFAB.box(),0); // } // } // FORT_BOUNDREFINE( // CHF_FRA1(markFAB,0), // CHF_CONST_REAL(refLocation), // CHF_CONST_REAL(m_dx), // CHF_BOX(b)); return true; }
void NewPoissonOp::setToZero(FArrayBox& a_lhs) { a_lhs.setVal(0.0); }
void setFunc(const Box&, int m, FArrayBox& F) { F.setVal(procID()); }
void PatchGodunov::PPMNormalPred(FArrayBox& a_WMinus, FArrayBox& a_WPlus, const Real& a_dt, const Real& a_dx, const FArrayBox& a_W, const FArrayBox& a_flat, const int& a_dir, const Box& a_box) { int numprim = m_gdnvPhysics->numPrimitives(); Box faceBox = a_box; // added by petermc, 22 Sep 2008: // for 4th order, need extra faces in all the directions if (m_highOrderLimiter) faceBox.grow(1); faceBox.surroundingNodes(a_dir); FArrayBox WFace(faceBox,numprim); // Return WFace on face-centered faceBox. m_util.PPMFaceValues(WFace,a_W,numprim, m_useCharLimiting || m_usePrimLimiting, a_dir,faceBox,m_currentTime,m_gdnvPhysics); // To save on storage, we use the input values as temporaries for the // delta's a_WMinus.setVal(0.0); a_WPlus .setVal(0.0); a_WMinus -= a_W; a_WPlus -= a_W; WFace.shiftHalf(a_dir,1); a_WMinus += WFace; WFace.shift(a_dir,-1); a_WPlus += WFace; FArrayBox lambda(a_box, numprim); m_gdnvPhysics->charValues(lambda, a_W, a_dir,a_box); if (m_useCharLimiting && m_usePrimLimiting) { MayDay::Error("PatchGodunov::PPMNormalPred: Attempt to limit slopes in primitive AND characteristic coordinates - not implemented"); } // Apply limiter on characteristic or primitive variables. Either // way, must end up with characteristic variables to pass to to the // normal predictor utility. Currently, cannot do both. // If doing characteristic limiting then transform before limiting if (m_useCharLimiting) { // Transform from primitive to characteristic variables m_gdnvPhysics->charAnalysis(a_WMinus,a_W,a_dir,a_box); m_gdnvPhysics->charAnalysis(a_WPlus ,a_W,a_dir,a_box); } if (m_useCharLimiting || m_usePrimLimiting) { // Do slope limiting // m_util.PPMLimiter(a_WMinus,a_WPlus,numprim,a_box); // petermc, 4 Sep 2008: included a_W and a_dir in argument list m_util.PPMLimiter(a_WMinus, a_WPlus, a_W, numprim, a_dir, a_box); // Do slope flattening if (m_useFlattening) { m_util.applyFlattening(a_WMinus,a_flat,a_box); m_util.applyFlattening(a_WPlus ,a_flat,a_box); } } // If not doing characteristic limiting then transform after any limiting if (!m_useCharLimiting) { // Transform from primitive to characteristic variables m_gdnvPhysics->charAnalysis(a_WMinus,a_W,a_dir,a_box); m_gdnvPhysics->charAnalysis(a_WPlus ,a_W,a_dir,a_box); } // To the normal prediction in characteristic variables m_util.PPMNormalPred(a_WMinus,a_WPlus,lambda,a_dt / a_dx,numprim,a_box); // Construct the increments to the primitive variables m_gdnvPhysics->charSynthesis(a_WMinus,a_W,a_dir,a_box); m_gdnvPhysics->charSynthesis(a_WPlus ,a_W,a_dir,a_box); // Apply a physics-dependent post-normal predictor step: // For example: // - adjust/bound delta's so constraints on extrapolated primitive // quantities are enforced (density and pressure > 0). // - compute source terms that depend on the spatially varying // coefficients. m_gdnvPhysics->postNormalPred(a_WMinus,a_WPlus,a_W,a_dt,a_dx,a_dir,a_box); // Compute the state from the increments a_WMinus += a_W; a_WPlus += a_W; }
void PatchGodunov::PLMNormalPred(FArrayBox& a_WMinus, FArrayBox& a_WPlus, const Real& a_dt, const Real& a_dx, const FArrayBox& a_W, const FArrayBox& a_flat, const int& a_dir, const Box& a_box) { int numprim = m_gdnvPhysics->numPrimitives(); // This will hold 2nd or 4th order slopes FArrayBox dW(a_box,numprim); if (m_useFourthOrderSlopes) { // 2nd order slopes need to be computed over a larger box to accommodate // the 4th order slope computation Box boxVL = a_box; boxVL.grow(a_dir,1); boxVL &= m_domain; // Compute 2nd order (van Leer) slopes FArrayBox dWvL(boxVL, numprim); m_util.vanLeerSlopes(dWvL,a_W,numprim, m_useCharLimiting || m_usePrimLimiting, a_dir,boxVL); m_gdnvPhysics->getPhysIBC()->setBdrySlopes(dWvL,a_W,a_dir,m_currentTime); // Compute 4th order slopes, without limiting. m_util.fourthOrderSlopes(dW,a_W,dWvL, numprim, a_dir,a_box); } else { // Compute 2nd order (van Leer) slopes m_util.vanLeerSlopes(dW,a_W,numprim, m_useCharLimiting || m_usePrimLimiting, a_dir,a_box); m_gdnvPhysics->getPhysIBC()->setBdrySlopes(dW,a_W,a_dir,m_currentTime); } // To save on storage, we use the input values as temporaries for the // delta's a_WMinus.setVal(0.0); a_WPlus .setVal(0.0); if (m_useCharLimiting || m_useFourthOrderSlopes) { // Compute one-sided differences as inputs for limiting. m_util.oneSidedDifferences(a_WMinus,a_WPlus,a_W,a_dir,a_box); } FArrayBox lambda(a_box, numprim); m_gdnvPhysics->charValues(lambda, a_W, a_dir,a_box); if (m_useCharLimiting && m_usePrimLimiting) { MayDay::Error("PatchGodunov::PLMNormalPred: Attempt to limit slopes in primitive AND characteristic coordinates - not implemented"); } // Apply limiter on characteristic or primitive variables. Either // way, must end up with characteristic variables to pass to to the // normal predictor utility. Currently, cannot do both. // If doing characteristic limiting then transform before limiting if (m_useCharLimiting) { // Transform from primitive to characteristic variables m_gdnvPhysics->charAnalysis(a_WMinus,a_W,a_dir,a_box); m_gdnvPhysics->charAnalysis(a_WPlus ,a_W,a_dir,a_box); m_gdnvPhysics->charAnalysis(dW ,a_W,a_dir,a_box); } if (m_useCharLimiting || m_usePrimLimiting) { // Limiting is already done for 2nd order slopes in primitive variables // so don't do it again if (m_useCharLimiting || m_useFourthOrderSlopes) { // Do slope limiting m_util.slopeLimiter(dW,a_WMinus,a_WPlus,numprim,a_box); } // Do slope flattening if (m_useFlattening) { m_util.applyFlattening(dW,a_flat,a_box); } } // If not doing characteristic limiting then transform after any limiting if (!m_useCharLimiting) { // Transform from primitive to characteristic variables m_gdnvPhysics->charAnalysis(dW,a_W,a_dir,a_box); } // To the normal prediction in characteristic variables m_util.PLMNormalPred(a_WMinus,a_WPlus,dW,lambda,a_dt / a_dx,a_box); // Construct the increments to the primitive variables m_gdnvPhysics->charSynthesis(a_WMinus,a_W,a_dir,a_box); m_gdnvPhysics->charSynthesis(a_WPlus ,a_W,a_dir,a_box); // Apply a physics-dependent post-normal predictor step: // For example: // - adjust/bound delta's so constraints on extrapolated primitive // quantities are enforced (density and pressure > 0). // - compute source terms that depend on the spatially varying // coefficients. m_gdnvPhysics->postNormalPred(a_WMinus,a_WPlus,a_W,a_dt,a_dx,a_dir,a_box); // Compute the state from the increments a_WMinus += a_W; a_WPlus += a_W; }