/* ************************************************************************** * Compute Connector widths that this class requires in order to work * properly on a given hierarchy. * * The only TimeRefinementIntegrator requirement is enough * self_connector_widths to fill the tag buffer that the integrator * passes to its GriddingAlgorithm. For some reason, this ghost width * was not registered at the time the required Connector widths are * computed. This appeared to be by design (see how it uses * GriddingAlgorithm::resetTagBufferingData), so I didn't change it, * but it probably should be redesigned. Filling the tag data ghosts * doesn't use recursive refine schedules, so it has no effect on the * fine_connector_widths. --BTNG. ************************************************************************** */ void TimeRefinementIntegratorConnectorWidthRequestor::computeRequiredConnectorWidths( std::vector<hier::IntVector>& self_connector_widths, std::vector<hier::IntVector>& fine_connector_widths, const hier::PatchHierarchy& patch_hierarchy) const { const tbox::Dimension& dim(patch_hierarchy.getDim()); const int max_levels(patch_hierarchy.getMaxNumberOfLevels()); fine_connector_widths.resize(max_levels - 1, hier::IntVector::getZero(dim)); self_connector_widths.clear(); self_connector_widths.reserve(max_levels); for (size_t ln = 0; ln < static_cast<size_t>(max_levels); ++ln) { hier::IntVector buffer( dim, d_tag_buffer.size() > ln ? d_tag_buffer[ln] : d_tag_buffer.back()); self_connector_widths.push_back(buffer); } }
/* ************************************************************************** * Compute Connector widths that this class requires in order to work * properly on a given hierarchy. ************************************************************************** */ void RefineScheduleConnectorWidthRequestor::computeRequiredConnectorWidths( std::vector<hier::IntVector>& self_connector_widths, std::vector<hier::IntVector>& fine_connector_widths, const hier::PatchHierarchy& patch_hierarchy) const { int max_levels = patch_hierarchy.getMaxNumberOfLevels(); const tbox::Dimension& dim(patch_hierarchy.getDim()); /* * Add one to max data ghost width to create overlaps of data * living on patch boundaries. */ const hier::IntVector max_data_gcw( patch_hierarchy.getPatchDescriptor()->getMaxGhostWidth(dim) + 1); hier::IntVector max_stencil_width = patch_hierarchy.getGridGeometry()->getMaxTransferOpStencilWidth(dim); max_stencil_width.max( RefinePatchStrategy::getMaxRefineOpStencilWidth(dim)); hier::IntVector zero_vector(hier::IntVector::getZero(dim), patch_hierarchy.getNumberBlocks()); /* * Compute the Connector width needed to ensure all edges are found * during mesh recursive refine schedule generation. It is safe to * be conservative, but carrying around a larger than necessary * width requires more memory and slows down Connector operations. * * All Connectors to self need to be at least wide enough to * support the copy of data from the same level into ghost cells. * Thus, the width should be at least that of the max ghost data * width. On the finest level, there is no other requirement. For * other levels, we need enough width for: * * - refining the next finer level * * - refining recursively starting at each of the levels finer than * it. */ hier::IntVector self_width(max_data_gcw * d_gcw_factor, patch_hierarchy.getNumberBlocks()); self_connector_widths.clear(); self_connector_widths.resize(max_levels, self_width); fine_connector_widths.clear(); if (max_levels > 1) { fine_connector_widths.resize(max_levels - 1, zero_vector); // to be computed below. } /* * Note that the following loops go from fine to coarse. This is * because Connector widths for coarse levels depend on those for * fine levels. */ for (int ln = max_levels - 1; ln > -1; --ln) { computeRequiredFineConnectorWidthsForRecursiveRefinement( fine_connector_widths, max_data_gcw, max_stencil_width, patch_hierarchy, ln); } }