// --------------------------------------------------------- // 27 March 2003: // This is called by other functions, and should not be called directly. Real maxnorm(const BoxLayoutData<NodeFArrayBox>& a_layout, const Interval& a_interval, bool a_verbose) { Real normTotal = 0.; // a_p == 0: max norm for (DataIterator it = a_layout.dataIterator(); it.ok(); ++it) { const Box& thisBox(a_layout.box(it())); // CELL-centered const NodeFArrayBox& thisNfab = a_layout[it()]; Real thisNfabNorm = maxnorm(thisNfab, thisBox, a_interval.begin(), a_interval.size()); if (a_verbose) cout << "maxnorm(" << thisBox << ") = " << thisNfabNorm << endl; normTotal = Max(normTotal, thisNfabNorm); } # ifdef CH_MPI Real recv; int result = MPI_Allreduce(&normTotal, &recv, 1, MPI_CH_REAL, MPI_MAX, Chombo_MPI::comm); if (result != MPI_SUCCESS) { //bark!!! MayDay::Error("sorry, but I had a communication error on maxnorm"); } normTotal = recv; # endif return normTotal; }
// --------------------------------------------------------- // 28 March 2003: // This is called by other functions, and should not be called directly. Real integral(const BoxLayoutData<NodeFArrayBox>& a_layout, const Real a_dx, const Interval& a_interval, bool a_verbose) { Real integralTotal = 0.; for (DataIterator it = a_layout.dataIterator(); it.ok(); ++it) { const NodeFArrayBox& thisNfab = a_layout[it()]; const Box& thisBox(a_layout.box(it())); // CELL-centered Real thisNfabIntegral = integral(thisNfab, a_dx, thisBox, a_interval.begin(), a_interval.size()); integralTotal += thisNfabIntegral; } # ifdef CH_MPI Real recv; // add up int result = MPI_Allreduce(&integralTotal, &recv, 1, MPI_CH_REAL, MPI_SUM, Chombo_MPI::comm); if (result != MPI_SUCCESS) { //bark!!! MayDay::Error("sorry, but I had a communication error on integral"); } integralTotal = recv; # endif return integralTotal; }
void IntensityDistributionHistogram::construct( const Billon &billon, const Interval<uint> &sliceInterval, const Interval<int> &intensityInterval, const uint &smoothingRadius ) { const uint &width = billon.n_cols; const uint &height = billon.n_rows; const int &minVal = intensityInterval.min(); uint i, j, k; clear(); resize(intensityInterval.size()+1); for ( k=sliceInterval.min() ; k<=sliceInterval.max() ; ++k ) { const Slice &slice = billon.slice(k); for ( j=0 ; j<height ; ++j ) { for ( i=0 ; i<width ; ++i ) { if ( intensityInterval.containsClosed(slice.at(j,i)) ) ++((*this)[slice.at(j,i)-minVal]); } } } meansSmoothing(smoothingRadius,false); }
void IntensityDistributionHistogram::construct( const Billon &billon, const Interval<uint> &sliceInterval, const Interval<uint> §orInterval, const iCoord2D &pithCoord, const uint &maxDistance, const Interval<int> &intensityInterval, const uint &smoothingRadius ) { const uint &width = billon.n_cols; const uint &height = billon.n_rows; const int &minVal = intensityInterval.min(); uint i, j, k; clear(); resize(intensityInterval.size()+1); for ( j=0 ; j<height ; ++j ) { for ( i=0 ; i<width ; ++i ) { if ( sectorInterval.containsClosed(PieChartSingleton::getInstance()->sectorIndexOfAngle(pithCoord.angle(iCoord2D(i,j)))) && pithCoord.euclideanDistance(iCoord2D(i,j)) < maxDistance ) { for ( k=sliceInterval.min() ; k<=sliceInterval.max() ; ++k ) { if ( intensityInterval.containsClosed(billon.slice(k).at(j,i)) ) ++((*this)[billon.slice(k).at(j,i)-minVal]); } } } } meansSmoothing(smoothingRadius,false); }
// ------------------------------------------------------------ // version of 27 March 2003: no finer level Real integral(const LevelData<NodeFArrayBox>& a_phi, const ProblemDomain& a_domain, const LayoutData< Vector<IntVectSet> >& a_IVSVext, const Real a_dx, const Interval a_comps, bool a_verbose) { // Idea: copy a_phi to temp, then zero out temp on // exterior nodes of grids at this level. int ncomps = a_comps.size(); const DisjointBoxLayout& grids = a_phi.getBoxes(); LevelData<NodeFArrayBox> temp(grids, ncomps); // Copy a_phi to temp. Interval newcomps(0, ncomps-1); for (DataIterator dit(grids.dataIterator()); dit.ok(); ++dit) { const NodeFArrayBox& nfab = a_phi[dit()]; const Box& bx = grids.get(dit()); temp[dit()].copy(bx, newcomps, bx, nfab, a_comps); } // Zero out temp on exterior nodes. zeroBoundaryNodes(temp, a_IVSVext); Real integralLevel = integral(temp, a_dx, newcomps, a_verbose); return integralLevel; }
void LevelFluxRegisterEdge::incrementFine( FArrayBox& a_fineFlux, Real a_scale, const DataIndex& a_fineDataIndex, const Interval& a_srcInterval, const Interval& a_dstInterval) { CH_assert(isDefined()); CH_assert(!a_fineFlux.box().isEmpty()); CH_assert(a_srcInterval.size() == a_dstInterval.size()); CH_assert(a_srcInterval.begin() >= 0); CH_assert(a_srcInterval.end() < a_fineFlux.nComp()); CH_assert(a_dstInterval.begin() >= 0); CH_assert(a_dstInterval.end() < m_nComp); int edgeDir = -1; for (int sideDir = 0; sideDir<SpaceDim; sideDir++) { if (a_fineFlux.box().type(sideDir) == IndexType::CELL) { edgeDir = sideDir; } } CH_assert(edgeDir >= 0); CH_assert(edgeDir < SpaceDim); for (int faceDir=0; faceDir<SpaceDim; faceDir++) { if (faceDir != edgeDir) { SideIterator sit; for (sit.begin(); sit.ok(); ++sit) { incrementFine(a_fineFlux, a_scale, a_fineDataIndex, a_srcInterval, a_dstInterval, faceDir, sit()); } } } }
virtual void linearIn(EBCellFAB& arg, void* buf, const Box& R, const Interval& comps) const { EBCellFAB tmp; tmp.clone(arg); tmp.linearIn(buf, R, comps); arg.plus(tmp, R, comps.begin(), comps.begin(), comps.size()); }
void op(EBCellFAB& dest, const Box& RegionFrom, const Interval& Cdest, const Box& RegionTo, const EBCellFAB& src, const Interval& Csrc) const { dest.plus(src, RegionFrom, Csrc.begin(), Cdest.begin(), Cdest.size()); }
void MappedLevelFluxRegister::incrementCoarse(const FArrayBox& a_coarseFlux, Real a_scale, const DataIndex& a_coarseDataIndex, const Interval& a_srcInterval, const Interval& a_dstInterval, int a_dir, Side::LoHiSide a_sd) { CH_assert(isDefined()); if (!(m_isDefined & FluxRegCoarseDefined)) return; CH_TIME("MappedLevelFluxRegister::incrementCoarse"); const Vector<Box>& intersect = m_coarseLocations[a_dir + a_sd * CH_SPACEDIM][a_coarseDataIndex]; FArrayBox& coarse = m_coarFlux[a_coarseDataIndex]; // We cast away the constness in a_coarseFlux for the scope of this function. This // should be acceptable, since at the end of the day there is no change to it. -JNJ FArrayBox& coarseFlux = const_cast<FArrayBox&>(a_coarseFlux); // Muhahaha. coarseFlux.shiftHalf(a_dir, sign(a_sd)); Real scale = -sign(a_sd) * a_scale; int s = a_srcInterval.begin(); int d = a_dstInterval.begin(); int size = a_srcInterval.size(); for (int b = 0; b < intersect.size(); ++b) { const Box& box = intersect[b]; Vector<Real> regbefore(coarse.nComp()); Vector<Real> regafter(coarse.nComp()); if (s_verbose && (a_dir == debugdir) && box.contains(ivdebnoeb)) { for (int ivar = 0; ivar < coarse.nComp(); ivar++) { regbefore[ivar] = coarse(ivdebnoeb, ivar); } } coarse.plus(coarseFlux, box, box, scale, s, d, size); if (s_verbose && (a_dir == debugdir) && box.contains(ivdebnoeb)) { for (int ivar = 0; ivar < coarse.nComp(); ivar++) { regafter[ivar] = coarse(ivdebnoeb, ivar); } pout() << "levelfluxreg::incrementCoar: scale = " << scale << ", "; for (int ivar = 0; ivar < coarse.nComp(); ivar++) { pout() << " input flux = " << coarseFlux(ivdebnoeb, ivar) << ", "; pout() << " reg before = " << regbefore[ivar] << ", "; pout() << " reg after = " << regafter[ivar] << ", "; } pout() << endl; } } coarseFlux.shiftHalf(a_dir, - sign(a_sd)); }
// --------------------------------------------------------- // 7 Dec 2005 Real maxnorm(const BoxLayoutData<NodeFArrayBox>& a_layout, const LevelData<NodeFArrayBox>& a_mask, const ProblemDomain& a_domain, const Interval& a_interval, bool a_verbose) { Real normTotal = 0.; // a_p == 0: max norm int ncomp = a_interval.size(); for (DataIterator it = a_layout.dataIterator(); it.ok(); ++it) { const NodeFArrayBox& thisNfab = a_layout[it()]; const FArrayBox& dataFab = thisNfab.getFab(); const FArrayBox& maskFab = a_mask[it()].getFab(); const Box& thisBox(a_layout.box(it())); // CELL-centered NodeFArrayBox dataMasked(thisBox, ncomp); FArrayBox& dataMaskedFab = dataMasked.getFab(); dataMaskedFab.copy(dataFab); // dataMaskedFab *= maskFab; for (int comp = a_interval.begin(); comp <= a_interval.end(); comp++) { // Set dataMaskedFab[comp] *= maskFab[0]. dataMaskedFab.mult(maskFab, 0, comp); } Real thisNfabNorm = maxnorm(dataMasked, thisBox, a_interval.begin(), a_interval.size()); if (a_verbose) cout << "maxnorm(" << thisBox << ") = " << thisNfabNorm << endl; normTotal = Max(normTotal, thisNfabNorm); } # ifdef CH_MPI Real recv; // add up (a_p is not 0) int result = MPI_Allreduce(&normTotal, &recv, 1, MPI_CH_REAL, MPI_MAX, Chombo_MPI::comm); if (result != MPI_SUCCESS) { //bark!!! MayDay::Error("sorry, but I had a communication error on norm"); } normTotal = recv; # endif return normTotal; }
void LevelFluxRegisterEdge::incrementCoarse(FArrayBox& a_coarseFlux, Real a_scale, const DataIndex& a_coarseDataIndex, const Interval& a_srcInterval, const Interval& a_dstInterval) { CH_assert(isDefined()); CH_assert(!a_coarseFlux.box().isEmpty()); CH_assert(a_srcInterval.size() == a_dstInterval.size()); CH_assert(a_srcInterval.begin() >= 0); CH_assert(a_srcInterval.end() < a_coarseFlux.nComp()); CH_assert(a_dstInterval.begin() >= 0); CH_assert(a_dstInterval.end() < m_nComp); // get edge-centering of coarseFlux const Box& edgeBox = a_coarseFlux.box(); int edgeDir = -1; for (int dir=0; dir<SpaceDim; dir++) { if (edgeBox.type(dir) == IndexType::CELL) { if (edgeDir == -1) { edgeDir = dir; } else { // already found a cell-centered direction (should only be // one for edge-centering) MayDay::Error("LevelFluxRegisterEdge::incrementCoarse -- e-field not edge-centered"); } } } // end loop over directions CH_assert(edgeDir != -1); FArrayBox& thisCrseReg = m_regCoarse[a_coarseDataIndex][edgeDir]; thisCrseReg.plus(a_coarseFlux, -a_scale, a_srcInterval.begin(), a_dstInterval.begin(), a_srcInterval.size()); }
// --------------------------------------------------------- // 27 March 2003: // This is called by other functions, and should not be called directly. Real norm(const BoxLayoutData<NodeFArrayBox>& a_layout, const Real a_dx, const int a_p, const Interval& a_interval, bool a_verbose) { if (a_p == 0) return maxnorm(a_layout, a_interval, a_verbose); Real normTotal = 0.; for (DataIterator it = a_layout.dataIterator(); it.ok(); ++it) { const NodeFArrayBox& thisNfab = a_layout[it()]; const Box& thisBox(a_layout.box(it())); // CELL-centered Real thisNfabNorm = norm(thisNfab, a_dx, thisBox, a_p, a_interval.begin(), a_interval.size()); if (a_verbose) cout << a_p << "norm(" << thisBox << ") = " << thisNfabNorm << endl; if (a_p == 1) { normTotal += thisNfabNorm; } else if (a_p == 2) { normTotal += thisNfabNorm * thisNfabNorm; } else { normTotal += pow(thisNfabNorm, Real(a_p)); } } # ifdef CH_MPI Real recv; // add up (a_p is not 0) int result = MPI_Allreduce(&normTotal, &recv, 1, MPI_CH_REAL, MPI_SUM, Chombo_MPI::comm); if (result != MPI_SUCCESS) { //bark!!! MayDay::Error("sorry, but I had a communication error on norm"); } normTotal = recv; # endif // now do sqrt, etc if (a_p == 2) normTotal = sqrt(normTotal); else if ((a_p != 0) && (a_p != 1)) normTotal = pow(normTotal, (Real)1.0/Real(a_p)); return normTotal; }
// ------------------------------------------------------------ // version of 27 March 2003 Real norm(const LevelData<NodeFArrayBox>& a_phi, const ProblemDomain& a_domain, const DisjointBoxLayout& a_finerGridsCoarsened, const LayoutData< Vector<Box> >& a_IVSVext, const LayoutData< Vector<Box> >& a_IVSVintFinerCoarsened, const int a_nRefFine, const Real a_dx, const Interval& a_comps, const int a_p, bool a_verbose) { // Idea: copy a_phi to temp, then zero out temp on: // - exterior nodes of grids at this level; // - projections of interior nodes of the finer grids. int ncomps = a_comps.size(); const DisjointBoxLayout& grids = a_phi.getBoxes(); LevelData<NodeFArrayBox> temp(grids, ncomps); // Copy a_phi to temp. Interval newcomps(0, ncomps-1); for (DataIterator dit(grids.dataIterator()); dit.ok(); ++dit) { const NodeFArrayBox& nfab = a_phi[dit()]; const Box& bx = grids.get(dit()); temp[dit()].copy(bx, newcomps, bx, nfab, a_comps); } // Zero out temp on exterior nodes. zeroBoundaryNodes(temp, a_IVSVext); // Define zeroCoarsened to be all zero on the coarsened finer grids. LevelData<NodeFArrayBox> zeroCoarsened(a_finerGridsCoarsened, ncomps, IntVect::Zero); for (DataIterator dit(a_finerGridsCoarsened.dataIterator()); dit.ok(); ++dit) zeroCoarsened[dit()].getFab().setVal(0.); // Set temp to zero on interior nodes of coarsened finer grids. copyInteriorNodes(temp, zeroCoarsened, a_IVSVintFinerCoarsened); Real normLevel = norm(temp, a_dx, a_p, newcomps, a_verbose); return normLevel; }
void TimeInterpolatorRK4::interpolate(/// interpolated solution on this level coarsened LevelData<FArrayBox>& a_U, /// time interpolation coefficient in range [0:1] const Real& a_timeInterpCoeff, /// interval of a_U to fill in const Interval& a_intvl) { CH_assert(m_defined); CH_assert(m_gotFullTaylorPoly); CH_assert(a_U.nComp() == m_numStates); LevelData<FArrayBox> UComp; aliasLevelData(UComp, &a_U, a_intvl); // For i in 0:m_numCoeffs-1, // coeffFirst[i] is index of first component of m_taylorCoeffs // that corresponds to a coefficient of t^i. Vector<int> coeffFirst(m_numCoeffs); int intervalLength = a_intvl.size(); for (int i = 0; i < m_numCoeffs; i++) { coeffFirst[i] = a_intvl.begin() + i * m_numStates; } DataIterator dit = UComp.dataIterator(); for (dit.begin(); dit.ok(); ++dit) { FArrayBox& UFab = UComp[dit]; const FArrayBox& taylorFab = m_taylorCoeffs[dit]; // Evaluate a0 + a1*t + a2*t^2 + a3*t^3 // as a0 + t * (a1 + t * (a2 + t * a3)). UFab.copy(taylorFab, coeffFirst[m_numCoeffs-1], 0, intervalLength); for (int ind = m_numCoeffs - 2; ind >=0; ind--) { UFab *= a_timeInterpCoeff; UFab.plus(taylorFab, coeffFirst[ind], 0, intervalLength); } } // dummy statement in order to get around gdb bug int dummy_unused = 0; dummy_unused = 0; }
int main(int argc, char *argv[]) { Pooma::initialize(argc, argv); Pooma::Tester tester(argc, argv); // To declare a field, you first need to set up a layout. This requires // knowing the physical vertex-domain and the number of external guard // cell layers. Vertex domains contain enough points to hold all of the // rectilinear centerings that POOMA is likely to support for quite // awhile. Also, it means that the same layout can be used for all // fields, regardless of centering. Interval<2> physicalVertexDomain(14, 14); Loc<2> blocks(3, 3); GridLayout<2> layout1(physicalVertexDomain, blocks, GuardLayers<2>(1), LayoutTag_t()); GridLayout<2> layout0(physicalVertexDomain, blocks, GuardLayers<2>(0), LayoutTag_t()); Centering<2> cell = canonicalCentering<2>(CellType, Continuous, AllDim); Centering<2> vert = canonicalCentering<2>(VertexType, Continuous, AllDim); Centering<2> yedge = canonicalCentering<2>(EdgeType, Continuous, YDim); Vector<2> origin(0.0); Vector<2> spacings(1.0, 2.0); // First basic test verifies that we're assigning to the correct areas // on a brick. typedef Field<UniformRectilinearMesh<2>, double, MultiPatch<GridTag, BrickTag_t> > Field_t; Field_t b0(cell, layout1, origin, spacings); Field_t b1(vert, layout1, origin, spacings); Field_t b2(yedge, layout1, origin, spacings); Field_t b3(yedge, layout1, origin, spacings); Field_t bb0(cell, layout0, origin, spacings); Field_t bb1(vert, layout0, origin, spacings); Field_t bb2(yedge, layout0, origin, spacings); b0.all() = 0.0; b1.all() = 0.0; b2.all() = 0.0; b0 = 1.0; b1 = 1.0; b2 = 1.0; bb0.all() = 0.0; bb1.all() = 0.0; bb2.all() = 0.0; bb0 = 1.0; bb1 = 1.0; bb2 = 1.0; // SPMD code follows. // Note, SPMD code will work with the evaluator if you are careful // to perform assignment on all the relevant contexts. The patchLocal // function creates a brick on the local context, so you can just perform // the assignment on that context. int i; for (i = 0; i < b0.numPatchesLocal(); ++i) { Patch<Field_t>::Type_t patch = b0.patchLocal(i); // tester.out() << "context " << Pooma::context() << ": assigning to patch " << i // << " with domain " << patch.domain() << std::endl; patch += 1.5; } // This is safe to do since b1 and b2 are built with the same layout. for (i = 0; i < b1.numPatchesLocal(); ++i) { b1.patchLocal(i) += 1.5; b2.patchLocal(i) += 1.5; } for (i = 0; i < bb0.numPatchesLocal(); ++i) { Patch<Field_t>::Type_t patch = bb0.patchLocal(i); // tester.out() << "context " << Pooma::context() << ": assigning to patch on bb0 " << i // << " with domain " << patch.domain() << std::endl; patch += 1.5; } // This is safe to do since bb1 and bb2 are built with the same layout. for (i = 0; i < bb1.numPatchesLocal(); ++i) { bb1.patchLocal(i) += 1.5; bb2.patchLocal(i) += 1.5; } tester.check("cell centered field is 2.5", all(b0 == 2.5)); tester.check("vert centered field is 2.5", all(b1 == 2.5)); tester.check("edge centered field is 2.5", all(b2 == 2.5)); tester.out() << "b0.all():" << std::endl << b0.all() << std::endl; tester.out() << "b1.all():" << std::endl << b1.all() << std::endl; tester.out() << "b2.all():" << std::endl << b2.all() << std::endl; tester.check("didn't write into b0 boundary", sum(b0.all()) == 2.5 * b0.physicalDomain().size()); tester.check("didn't write into b1 boundary", sum(b1.all()) == 2.5 * b1.physicalDomain().size()); tester.check("didn't write into b2 boundary", sum(b2.all()) == 2.5 * b2.physicalDomain().size()); tester.check("cell centered field is 2.5", all(bb0 == 2.5)); tester.check("vert centered field is 2.5", all(bb1 == 2.5)); tester.check("edge centered field is 2.5", all(bb2 == 2.5)); tester.out() << "bb0:" << std::endl << bb0 << std::endl; tester.out() << "bb1:" << std::endl << bb1 << std::endl; tester.out() << "bb2:" << std::endl << bb2 << std::endl; typedef Field<UniformRectilinearMesh<2>, double, MultiPatch<GridTag, CompressibleBrickTag_t> > CField_t; CField_t c0(cell, layout1, origin, spacings); CField_t c1(vert, layout1, origin, spacings); CField_t c2(yedge, layout1, origin, spacings); CField_t cb0(cell, layout0, origin, spacings); CField_t cb1(vert, layout0, origin, spacings); CField_t cb2(yedge, layout0, origin, spacings); c0.all() = 0.0; c1.all() = 0.0; c2.all() = 0.0; c0 = 1.0; c1 = 1.0; c2 = 1.0; cb0.all() = 0.0; cb1.all() = 0.0; cb2.all() = 0.0; cb0 = 1.0; cb1 = 1.0; cb2 = 1.0; // SPMD code follows. // Note, SPMD code will work with the evaluator if you are careful // to perform assignment on all the relevant contexts. The patchLocal // function creates a brick on the local context, so you can just perform // the assignment on that context. for (i = 0; i < c0.numPatchesLocal(); ++i) { Patch<CField_t>::Type_t patch = c0.patchLocal(i); tester.out() << "context " << Pooma::context() << ": assigning to patch " << i << " with domain " << patch.domain() << std::endl; patch += 1.5; } // This is safe to do since c1 and c2 are built with the same layout. for (i = 0; i < c1.numPatchesLocal(); ++i) { c1.patchLocal(i) += 1.5; c2.patchLocal(i) += 1.5; } for (i = 0; i < cb0.numPatchesLocal(); ++i) { Patch<CField_t>::Type_t patch = cb0.patchLocal(i); tester.out() << "context " << Pooma::context() << ": assigning to patch on cb0 " << i << " with domain " << patch.domain() << std::endl; patch += 1.5; } // This is safe to do since cb1 and cb2 are cuilt with the same layout. for (i = 0; i < cb1.numPatchesLocal(); ++i) { cb1.patchLocal(i) += 1.5; cb2.patchLocal(i) += 1.5; } tester.check("cell centered field is 2.5", all(c0 == 2.5)); tester.check("vert centered field is 2.5", all(c1 == 2.5)); tester.check("edge centered field is 2.5", all(c2 == 2.5)); tester.out() << "c0.all():" << std::endl << c0.all() << std::endl; tester.out() << "c1.all():" << std::endl << c1.all() << std::endl; tester.out() << "c2.all():" << std::endl << c2.all() << std::endl; tester.check("didn't write into c0 boundary", sum(c0.all()) == 2.5 * c0.physicalDomain().size()); tester.check("didn't write into c1 boundary", sum(c1.all()) == 2.5 * c1.physicalDomain().size()); tester.check("didn't write into c2 boundary", sum(c2.all()) == 2.5 * c2.physicalDomain().size()); tester.check("cell centered field is 2.5", all(cb0 == 2.5)); tester.check("vert centered field is 2.5", all(cb1 == 2.5)); tester.check("edge centered field is 2.5", all(cb2 == 2.5)); tester.out() << "cb0:" << std::endl << cb0 << std::endl; tester.out() << "cb1:" << std::endl << cb1 << std::endl; tester.out() << "cb2:" << std::endl << cb2 << std::endl; //------------------------------------------------------------------ // Scalar code example: // c0 = iota(c0.domain()).comp(0); c1 = iota(c1.domain()).comp(1); // Make sure all the data-parallel are done: Pooma::blockAndEvaluate(); for (i = 0; i < c0.numPatchesLocal(); ++i) { Patch<CField_t>::Type_t local0 = c0.patchLocal(i); Patch<CField_t>::Type_t local1 = c1.patchLocal(i); Patch<CField_t>::Type_t local2 = c2.patchLocal(i); Interval<2> domain = local2.domain(); // physical domain of local y-edges // -------------------------------------------------------------- // I believe the following is probably the most efficient approach // for sparse computations. For data-parallel computations, the // evaluator will uncompress the patches and take brick views, which // provide the most efficient access. If you are only performing // the computation on a small portion of cells, then the gains would // be outweighed by the act of copying the compressed value to all the // cells. // // The read function is used on the right hand side, because // operator() is forced to uncompress the patch just in case you want // to write to it. for(Interval<2>::iterator pos = domain.begin(); pos != domain.end(); ++pos) { Loc<2> edge = *pos; Loc<2> rightCell = edge; // cell to right is same cell Loc<2> leftCell = edge - Loc<2>(1,0); Loc<2> topVert = edge + Loc<2>(0, 1); Loc<2> bottomVert = edge; local2(edge) = local0.read(rightCell) + local0.read(leftCell) + local1.read(topVert) + local1.read(bottomVert); } // This statement is optional, it tries to compress the patch after // we're done computing on it. Since I used .read() for the local0 and 1 // they remained in their original state. compress() can be expensive, so // it may not be worth trying unless space is really important. compress(local2); } tester.out() << "c0" << std::endl << c0 << std::endl; tester.out() << "c1" << std::endl << c1 << std::endl; tester.out() << "c2" << std::endl << c2 << std::endl; //------------------------------------------------------------------ // Interfacing with a c-function: // // This example handles the corner cases, where the patches from a // cell centered field with no guard layers actually contain some // extra data. Pooma::blockAndEvaluate(); for (i = 0; i < cb0.numPatchesLocal(); ++i) { Patch<CField_t>::Type_t local0 = cb0.patchLocal(i); Interval<2> physicalDomain = local0.physicalDomain(); double *data; int size = physicalDomain.size(); if (physicalDomain == local0.totalDomain()) { uncompress(local0); data = &local0(physicalDomain.firsts()); nonsense(data, size); } else { // In this case, the engine has extra storage even though the // field has the right domain. We copy it to a brick engine, // call the function and copy it back. No uncompress is required, // since the assignment will copy the compressed value into the // brick. // arrayView is a work-around. Array = Field doesn't work at // the moment. Array<2, double, Brick> brick(physicalDomain); Array<2, double, CompressibleBrick> arrayView(local0.engine()); brick = arrayView(physicalDomain); Pooma::blockAndEvaluate(); data = &brick(Loc<2>(0)); nonsense(data, size); arrayView(physicalDomain) = brick; // Note that we don't need a blockAndEvaluate here, since an iterate has // been spawned to perform the copy. } // If you want to try compress(local0) here, you should do blockAndEvaluate // first in case the local0 = brick hasn't been executed yet. } tester.out() << "cb0.all()" << std::endl << cb0 << std::endl; b2 = positions(b2).comp(0); RefCountedBlockPtr<double> block = pack(b2); // The following functions give you access to the raw data from pack. // Note that the lifetime of the data is managed by the RefCountedBlockPtr, // so when "block" goes out of scope, the data goes away. (i.e. Don't write // a function where you return block.beginPointer().) double *start = block.beginPointer(); // start of the data double *end = block.endPointer(); // one past the end int size = block.size(); // size of the data tester.out() << Pooma::context() << ":" << block.size() << std::endl; unpack(b3, block); tester.out() << "b2" << std::endl << b2 << std::endl; tester.out() << "b3" << std::endl << b3 << std::endl; tester.check("pack, unpack", all(b2 == b3)); int ret = tester.results("LocalPatch"); Pooma::finalize(); return ret; }
// --------------------------------------------------------- // 7 Dec 2005 Real norm(const BoxLayoutData<NodeFArrayBox>& a_layout, const LevelData<NodeFArrayBox>& a_mask, const ProblemDomain& a_domain, const Real a_dx, const int a_p, const Interval& a_interval, bool a_verbose) { if (a_p == 0) return maxnorm(a_layout, a_mask, a_domain, a_interval, a_verbose); Real normTotal = 0.; int ncomp = a_interval.size(); Box domBox = a_domain.domainBox(); for (DataIterator it = a_layout.dataIterator(); it.ok(); ++it) { const NodeFArrayBox& thisNfab = a_layout[it()]; const FArrayBox& dataFab = thisNfab.getFab(); const FArrayBox& maskFab = a_mask[it()].getFab(); const Box& thisBox(a_layout.box(it())); // CELL-centered NodeFArrayBox dataMasked(thisBox, ncomp); FArrayBox& dataMaskedFab = dataMasked.getFab(); dataMaskedFab.copy(dataFab); // dataMaskedFab *= maskFab; for (int comp = a_interval.begin(); comp <= a_interval.end(); comp++) { // Set dataMaskedFab[comp] *= maskFab[0]. dataMaskedFab.mult(maskFab, 0, comp); } Real thisNfabNorm = 0.; if (thisBox.intersects(domBox)) { Box thisBoxInDomain = thisBox & domBox; thisNfabNorm = norm(dataMasked, a_dx, thisBoxInDomain, a_p, a_interval.begin(), a_interval.size()); } if (a_verbose) cout << a_p << "norm(" << thisBox << ") = " << thisNfabNorm << endl; if (a_p == 1) { normTotal += thisNfabNorm; } else if (a_p == 2) { normTotal += thisNfabNorm * thisNfabNorm; } else { normTotal += pow(thisNfabNorm, Real(a_p)); } } # ifdef CH_MPI Real recv; // add up (a_p is not 0) int result = MPI_Allreduce(&normTotal, &recv, 1, MPI_CH_REAL, MPI_SUM, Chombo_MPI::comm); if (result != MPI_SUCCESS) { //bark!!! MayDay::Error("sorry, but I had a communication error on norm"); } normTotal = recv; # endif // now do sqrt, etc if (a_p == 2) normTotal = sqrt(normTotal); else if ((a_p != 0) && (a_p != 1)) normTotal = pow(normTotal, (Real)1.0/Real(a_p)); return normTotal; }
void MappedLevelFluxRegister::incrementFine(const FArrayBox& a_fineFlux, Real a_scale, const DataIndex& a_fineDataIndex, const Interval& a_srcInterval, const Interval& a_dstInterval, int a_dir, Side::LoHiSide a_sd) { CH_assert(isDefined()); if (!(m_isDefined & FluxRegFineDefined)) return; CH_assert(a_srcInterval.size() == a_dstInterval.size()); CH_TIME("MappedLevelFluxRegister::incrementFine"); // We cast away the constness in a_coarseFlux for the scope of this function. This // should be acceptable, since at the end of the day there is no change to it. -JNJ FArrayBox& fineFlux = const_cast<FArrayBox&>(a_fineFlux); // Muhahaha. fineFlux.shiftHalf(a_dir, sign(a_sd)); Real denom = 1.0; if (m_scaleFineFluxes) { denom = m_nRefine.product() / m_nRefine[a_dir]; } Real scale = sign(a_sd) * a_scale / denom; FArrayBox& cFine = m_fineFlux[a_fineDataIndex]; // FArrayBox cFineFortran(cFine.box(), cFine.nComp()); // cFineFortran.copy(cFine); Box clipBox = m_fineFlux.box(a_fineDataIndex); clipBox.refine(m_nRefine); Box fineBox; if (a_sd == Side::Lo) { fineBox = adjCellLo(clipBox, a_dir, 1); fineBox &= fineFlux.box(); } else { fineBox = adjCellHi(clipBox, a_dir, 1); fineBox &= fineFlux.box(); } #if 0 for (BoxIterator b(fineBox); b.ok(); ++b) { int s = a_srcInterval.begin(); int d = a_dstInterval.begin(); for (; s <= a_srcInterval.end(); ++s, ++d) { cFine(coarsen(b(), m_nRefine), d) += scale * fineFlux(b(), s); } } #else // shifting to ensure fineBox is in the positive quadrant, so IntVect coarening // is just integer division. const Box& box = coarsen(fineBox, m_nRefine); Vector<Real> regbefore(cFine.nComp()); Vector<Real> regafter(cFine.nComp()); if (s_verbose && (a_dir == debugdir) && box.contains(ivdebnoeb)) { for (int ivar = 0; ivar < cFine.nComp(); ivar++) { regbefore[ivar] = cFine(ivdebnoeb, ivar); } } const IntVect& iv = fineBox.smallEnd(); IntVect civ = coarsen(iv, m_nRefine); int srcComp = a_srcInterval.begin(); int destComp = a_dstInterval.begin(); int ncomp = a_srcInterval.size(); FORT_MAPPEDINCREMENTFINE(CHF_CONST_FRA_SHIFT(fineFlux, iv), CHF_FRA_SHIFT(cFine, civ), CHF_BOX_SHIFT(fineBox, iv), CHF_CONST_INTVECT(m_nRefine), CHF_CONST_REAL(scale), CHF_CONST_INT(srcComp), CHF_CONST_INT(destComp), CHF_CONST_INT(ncomp)); if (s_verbose && (a_dir == debugdir) && box.contains(ivdebnoeb)) { for (int ivar = 0; ivar < cFine.nComp(); ivar++) { regafter[ivar] = cFine(ivdebnoeb, ivar); } } if (s_verbose && (a_dir == debugdir) && box.contains(ivdebnoeb)) { pout() << "levelfluxreg::incrementFine: scale = " << scale << endl; Box refbox(ivdebnoeb, ivdebnoeb); refbox.refine(m_nRefine); refbox &= fineBox; if (!refbox.isEmpty()) { pout() << "fine fluxes = " << endl; for (BoxIterator bit(refbox); bit.ok(); ++bit) { for (int ivar = 0; ivar < cFine.nComp(); ivar++) { pout() << "iv = " << bit() << "("; for (int ivar = 0; ivar < cFine.nComp(); ivar++) { pout() << fineFlux(bit(), ivar); } pout() << ")" << endl; } } } for (int ivar = 0; ivar < cFine.nComp(); ivar++) { pout() << " reg before = " << regbefore[ivar] << ", "; pout() << " reg after = " << regafter[ivar] << ", "; } pout() << endl; } //fineBox.shift(-shift); // now, check that cFineFortran and cFine are the same //fineBox.coarsen(m_nRefine); // for (BoxIterator b(fineBox); b.ok(); ++b) // { // if (cFineFortran(b(),0) != cFine(b(),0)) // { // MayDay::Error("Fortran doesn't match C++"); // } // } // need to shift boxes back to where they were on entry. // cFine.shift(-shift/m_nRefine); #endif fineFlux.shiftHalf(a_dir, - sign(a_sd)); }
void LevelFluxRegisterEdge::incrementFine( FArrayBox& a_fineFlux, Real a_scale, const DataIndex& a_fineDataIndex, const Interval& a_srcInterval, const Interval& a_dstInterval, int a_dir, Side::LoHiSide a_sd) { CH_assert(isDefined()); CH_assert(!a_fineFlux.box().isEmpty()); CH_assert(a_srcInterval.size() == a_dstInterval.size()); CH_assert(a_srcInterval.begin() >= 0); CH_assert(a_srcInterval.end() < a_fineFlux.nComp()); CH_assert(a_dstInterval.begin() >= 0); CH_assert(a_dstInterval.end() < m_nComp); CH_assert(a_dir >= 0); CH_assert(a_dir < SpaceDim); CH_assert((a_sd == Side::Lo)||(a_sd == Side::Hi)); // // //denom is the number of fine faces per coarse face //this is intrinsically dimension-dependent #if (CH_SPACEDIM == 2) Real denom = 1; #elif (CH_SPACEDIM == 3) Real denom = m_nRefine; #else // This code doesn't make any sense in 1D, and hasn't been implemented // for DIM > 3 Real denom = -1.0; MayDay::Error("LevelFluxRegisterEdge -- bad SpaceDim"); #endif Real scale = a_scale/denom; // need which fluxbox face we're doing this for Box thisBox = a_fineFlux.box(); int fluxComp = -1; for (int sideDir=0; sideDir<SpaceDim; sideDir++) { // we do nothing in the direction normal to face if (sideDir != a_dir) { if (thisBox.type(sideDir) == IndexType::CELL) { fluxComp = sideDir; } } } CH_assert (fluxComp >= 0); int regcomp = getRegComp(a_dir, fluxComp); FluxBox& thisReg = m_fabFine[index(a_dir, a_sd)][a_fineDataIndex]; FArrayBox& reg = thisReg[regcomp]; a_fineFlux.shiftHalf(a_dir, sign(a_sd)); // this is a way of geting a face-centered domain // box which we can then use to intersect with things // to screen out cells outside the physical domain // (nothing is screened out in periodic case) Box shiftedValidDomain = m_domainCoarse.domainBox(); shiftedValidDomain.grow(2); shiftedValidDomain &= m_domainCoarse; shiftedValidDomain.surroundingNodes(regcomp); BoxIterator regIt(reg.box() & shiftedValidDomain); for (regIt.begin(); regIt.ok(); ++regIt) { const IntVect& coarseIndex = regIt(); // create a cell-centered box, then shift back to face-centered Box box(coarseIndex, coarseIndex); box.shiftHalf(regcomp,-1); // to avoid adding in edges which do not overlie coarse-grid // edges, will refine only in non-fluxComp directions to // determine box from which to grab fluxes. IntVect refineVect(m_nRefine*IntVect::Unit); //refineVect.setVal(fluxComp,1); box.refine(refineVect); if (a_sd == Side::Lo) box.growLo(a_dir,-(m_nRefine-1)); else box.growHi(a_dir,-(m_nRefine-1)); BoxIterator fluxIt(box); for (fluxIt.begin(); fluxIt.ok(); ++fluxIt) { int src = a_srcInterval.begin(); int dest = a_dstInterval.begin(); for ( ; src <=a_srcInterval.end(); ++src,++dest) reg(coarseIndex, dest) += scale*a_fineFlux(fluxIt(), src); } } a_fineFlux.shiftHalf(a_dir, -sign(a_sd)); }
//---------------------------------------------------------------------------- void EBNormalizeByVolumeFraction:: operator()(LevelData<EBCellFAB>& a_Q, const Interval& a_compInterval) const { CH_TIME("EBNormalizer::operator()"); // Endpoints of the given interval. int begin = a_compInterval.begin(), end = a_compInterval.end(), length = a_compInterval.size(); // Loop over the EBISBoxes within our grid. The EB data structures are // indexed in the same manner as the non-EB data structures, so we piggy- // back the former on the latter. a_Q.exchange(); EBISLayout ebisLayout = m_levelGrid.getEBISL(); DisjointBoxLayout layout = m_levelGrid.getDBL(); for (DataIterator dit = layout.dataIterator(); dit.ok(); ++dit) { const EBISBox& box = ebisLayout[dit()]; EBCellFAB& QFAB = a_Q[dit()]; // Go over the irregular cells in this box. const IntVectSet& irregCells = box.getIrregIVS(layout[dit()]); // The average has to be computed from the uncorrected data from all // the neighbors, so we can't apply the corrections in place. For now, // we stash them in a map. map<VolIndex, vector<Real> > correctedValues; for (VoFIterator vit(irregCells, box.getEBGraph()); vit.ok(); ++vit) { Real kappajSum = 0.0; vector<Real> kappajQjSum(length, 0.0); // Get all of the indices of the VoFs within a monotone path // radius of 1. VolIndex vofi = vit(); Vector<VolIndex> vofjs; EBArith::getAllVoFsInMonotonePath(vofjs, vofi, box, 1); // Accumulate the contributions from the neighboring cells. for (unsigned int j = 0; j < vofjs.size(); ++j) { VolIndex vofj = vofjs[j]; Real kappaj = box.volFrac(vofj); for (int icomp = begin; icomp <= end; ++icomp) { kappajQjSum[icomp] += QFAB(vofj, icomp); } // Add this volume fraction to the sum. kappajSum += kappaj; } if (kappajSum > 0.) { // Normalize the quantity and stow it. vector<Real> correctedValue(length); // Real kappai = box.volFrac(vofi); //unused dtg for (int icomp = begin; icomp <= end; ++icomp) { // correctedValue[icomp - begin] = // QFAB(vofi, icomp) + (1.0 - kappai) * kappajQjSum[icomp] / kappajSum; correctedValue[icomp - begin] = kappajQjSum[icomp] / kappajSum; } correctedValues[vofi] = correctedValue; } } // Apply the corrections. for (map<VolIndex, vector<Real> >::const_iterator cit = correctedValues.begin(); cit != correctedValues.end(); ++cit) { for (int icomp = begin; icomp <= end; ++icomp) { QFAB(cit->first, icomp) = cit->second[icomp-begin]; } } } }
void TimeInterpolatorRK4::intermediate(/// intermediate RK4 solution on this level coarsened LevelData<FArrayBox>& a_U, /// time interpolation coefficient in range [0:1] const Real& a_timeInterpCoeff, /// which RK4 stage: 0, 1, 2, 3 const int& a_stage, /// interval of a_U to fill in const Interval& a_intvl) const { CH_assert(m_defined); CH_assert(m_gotFullTaylorPoly); CH_assert(a_U.nComp() == m_numStates); CH_assert(a_stage >= 0); CH_assert(a_stage < 4); Real rinv = 1. / Real(m_refineCoarse); Vector<Real> intermCoeffs(4); // 0 is coefficient of m_taylorCoeffs[0] = Ucoarse(0) // 1 is coefficient of m_taylorCoeffs[1] = K1 // 2 is coefficient of m_taylorCoeffs[2] = 1/2 * (-3*K1 + 2*K2 + 2*K3 - K4) // 3 is coefficient of m_taylorCoeffs[3] = 2/3 * ( K1 - K2 - K3 + K4) Real diff12Coeffs; // coefficient of m_diff12 = - K2 + K3 switch (a_stage) { case 0: intermCoeffs[0] = 1.; intermCoeffs[1] = a_timeInterpCoeff; intermCoeffs[2] = a_timeInterpCoeff * a_timeInterpCoeff; intermCoeffs[3] = a_timeInterpCoeff * a_timeInterpCoeff * a_timeInterpCoeff; diff12Coeffs = 0.; break; case 1: intermCoeffs[0] = 1.; intermCoeffs[1] = 0.5*rinv + a_timeInterpCoeff; intermCoeffs[2] = a_timeInterpCoeff * (rinv + a_timeInterpCoeff); intermCoeffs[3] = a_timeInterpCoeff * a_timeInterpCoeff * (1.5*rinv + a_timeInterpCoeff); diff12Coeffs = 0.; break; case 2: intermCoeffs[0] = 1.; intermCoeffs[1] = 0.5*rinv + a_timeInterpCoeff; intermCoeffs[2] = 0.5*rinv*rinv + a_timeInterpCoeff * (rinv + a_timeInterpCoeff); intermCoeffs[3] = 0.375*rinv*rinv*rinv + a_timeInterpCoeff * (1.5*rinv*rinv + a_timeInterpCoeff * (1.5*rinv + a_timeInterpCoeff)); diff12Coeffs = -0.25 * rinv * rinv; break; case 3: intermCoeffs[0] = 1.; intermCoeffs[1] = rinv + a_timeInterpCoeff; intermCoeffs[2] = rinv*rinv + a_timeInterpCoeff * (2.*rinv + a_timeInterpCoeff); intermCoeffs[3] = 0.75*rinv*rinv*rinv + a_timeInterpCoeff * (3.*rinv*rinv + a_timeInterpCoeff * (3.*rinv + a_timeInterpCoeff)); diff12Coeffs = 0.5 * rinv * rinv; break; default: MayDay::Error("TimeInterpolatorRK4::intermediate must have a_stage in range 0:3"); } LevelData<FArrayBox> UComp; aliasLevelData(UComp, &a_U, a_intvl); // For i in 0:m_numCoeffs-1, // coeffFirst[i] is index of first component of m_taylorCoeffs // that corresponds to a coefficient of t^i. Vector<int> coeffFirst(m_numCoeffs); int intervalLength = a_intvl.size(); for (int i = 0; i < m_numCoeffs; i++) { coeffFirst[i] = a_intvl.begin() + i * m_numStates; } DataIterator dit = UComp.dataIterator(); for (dit.begin(); dit.ok(); ++dit) { FArrayBox& UFab = UComp[dit]; const FArrayBox& taylorFab = m_taylorCoeffs[dit]; // WAS: Evaluate a0 + a1*t + a2*t^2 + a3*t^3 // as a0 + t * (a1 + t * (a2 + t * a3)): // that is, set UFab to // a3, t*a3 + a2, t*(t*a3 + a2) + a1, t*(t*(t*a3 + a2) + a1) * a0. // NEW: Evaluate a0*c0 + a1*c1 + a2*c2 + a3*c3, // where c0, c1, c2, c3 are scalars, // and c0 = intermCoeffs[0] = 1. UFab.copy(taylorFab, coeffFirst[0], 0, intervalLength); for (int ind = 1; ind < 4; ind++) { UFab.plus(taylorFab, intermCoeffs[ind], coeffFirst[ind], 0, intervalLength); } const FArrayBox& diff12Fab = m_diff12[dit]; UFab.plus(diff12Fab, diff12Coeffs, 0, 0, intervalLength); } // dummy statement in order to get around gdb bug int dummy_unused = 0; dummy_unused = 0; }
void TraceState(/// state at time t+dt/2 on edges in direction dir FArrayBox& a_stateHalf, /// cell-centered state at time t const FArrayBox& a_state, /// cell-centered velocity at time t const FArrayBox& a_cellVel, /// edge-centered advection velocity at time t+dt/2 const FluxBox& a_advectionVel, /// cell-centered source const FArrayBox& a_source, /// Physical domain const ProblemDomain& a_dProblem, /// interior of grid patch const Box& a_gridBox, /// timeStep const Real a_dt, /// cell-spacing const Real a_dx, /// direction in which to perform tracing const int a_dir, /// which components to trace const Interval& a_srcComps, /// where to put traced components in a_stateHalf, const Interval& a_destComps) { int ncomp = a_srcComps.size(); CH_assert (ncomp == a_destComps.size()); int offset = a_destComps.begin() - a_srcComps.begin(); Box edgeBox = a_gridBox; edgeBox.surroundingNodes(a_dir); #ifdef SIMPLEUPWIND // simple cell-to-edge averaging may be causing us problems -- // instead do simple upwinding for (int comp=a_srcComps.begin(); comp <= a_srcComps.end(); comp++) { int destcomp = comp+offset; FORT_UPWINDCELLTOEDGE(CHF_FRA1(a_stateHalf, destComp), CHF_CONST_FRA1(a_state,comp), CHF_CONST_FRA(a_advectionVel[a_dir]), CHF_BOX(edgeBox), CHF_CONST_INT(a_dir)); } #else // first compute slopes Box slopesBox = grow(a_gridBox,1); // these are debugging changes to sync with old code #ifdef MATCH_OLDCODE FluxBox tempEdgeVel(slopesBox,1); tempEdgeVel.setVal(0.0); FArrayBox tempCellVel(slopesBox,SpaceDim); tempCellVel.setVal(0.0); CellToEdge(a_cellVel, tempEdgeVel); EdgeToCell(tempEdgeVel, tempCellVel); tempEdgeVel.clear(); // done with this, so reclaim memory EdgeToCell(a_advectionVel, tempCellVel); #endif FArrayBox delS(slopesBox,1); FArrayBox sHat(slopesBox,1); FArrayBox sTilde(a_state.box(),1); for (int comp=a_srcComps.begin(); comp<=a_srcComps.end(); comp++) { int destComp = comp+offset; #ifdef SET_BOGUS_VALUES delS.setVal(BOGUS_VALUE); sHat.setVal(BOGUS_VALUE); sTilde.setVal(BOGUS_VALUE); #endif // compute van leer limited slopes in the normal direction // for now, always limit slopes, although might want to make // this a parameter int limitSlopes = 1; FORT_SLOPES(CHF_FRA1(delS,0), CHF_CONST_FRA1(a_state,comp), CHF_BOX(slopesBox), CHF_CONST_INT(a_dir), CHF_CONST_INT(limitSlopes)); // this is a way to incorporate the minion correction -- // stateTilde = state + (dt/2)*source sTilde.copy(a_source,comp,0,1); Real sourceFactor = a_dt/2.0; sTilde *= sourceFactor; sTilde.plus(a_state, comp,0,1); sHat.copy(sTilde,slopesBox); // now loop over directions, adding transverse components for (int localDir=0; localDir < SpaceDim; localDir++) { if (localDir != a_dir) { // add simple transverse components FORT_TRANSVERSE(CHF_FRA1(sHat,0), CHF_CONST_FRA1(sTilde,0), #ifdef MATCH_OLDCODE CHF_CONST_FRA1(tempCellVel, localDir), #else CHF_CONST_FRA1(a_cellVel,localDir), #endif CHF_BOX(slopesBox), CHF_CONST_REAL(a_dt), CHF_CONST_REAL(a_dx), CHF_CONST_INT(localDir)); //CHF_FRA1(temp,0)); // not used in function } else if (SpaceDim == 3) { // only add cross derivative transverse piece if we're in 3D // note that we need both components of velocity for this one FORT_TRANSVERSECROSS(CHF_FRA1(sHat,0), CHF_CONST_FRA1(sTilde,0), CHF_CONST_FRA(a_cellVel), CHF_BOX(slopesBox), CHF_CONST_REAL(a_dt), CHF_CONST_REAL(a_dx), CHF_CONST_INT(localDir)); } } // end loop over directions // now compute left and right states and resolve the Reimann // problem to get single set of edge-centered values FORT_PREDICT(CHF_FRA1(a_stateHalf,destComp), CHF_CONST_FRA1(sHat,0), CHF_CONST_FRA1(delS,0), CHF_CONST_FRA1(a_cellVel,a_dir), CHF_CONST_FRA1(a_advectionVel[a_dir],0), CHF_BOX(edgeBox), CHF_CONST_REAL(a_dt), CHF_CONST_REAL(a_dx), CHF_CONST_INT(a_dir)); } // end loop over components #endif }
string ReferenceMap::get_sequence(const Interval& interval, const bool reverse_strand) const { const auto& seq = this->at(interval.chr()); auto result = seq.substr(interval.start()-1, interval.size()); return reverse_strand ? utils::complement(result) : result; }