/*
 **************************************************************************
 * Compute Connector widths that this class requires in order to work
 * properly on a given hierarchy.
 *
 * The only TimeRefinementIntegrator requirement is enough
 * self_connector_widths to fill the tag buffer that the integrator
 * passes to its GriddingAlgorithm.  For some reason, this ghost width
 * was not registered at the time the required Connector widths are
 * computed.  This appeared to be by design (see how it uses
 * GriddingAlgorithm::resetTagBufferingData), so I didn't change it,
 * but it probably should be redesigned.  Filling the tag data ghosts
 * doesn't use recursive refine schedules, so it has no effect on the
 * fine_connector_widths.  --BTNG.
 **************************************************************************
 */
void
TimeRefinementIntegratorConnectorWidthRequestor::computeRequiredConnectorWidths(
   std::vector<hier::IntVector>& self_connector_widths,
   std::vector<hier::IntVector>& fine_connector_widths,
   const hier::PatchHierarchy& patch_hierarchy) const
{
   const tbox::Dimension& dim(patch_hierarchy.getDim());
   const int max_levels(patch_hierarchy.getMaxNumberOfLevels());

   fine_connector_widths.resize(max_levels - 1, hier::IntVector::getZero(dim));
   self_connector_widths.clear();
   self_connector_widths.reserve(max_levels);
   for (size_t ln = 0; ln < static_cast<size_t>(max_levels); ++ln) {
      hier::IntVector buffer(
         dim,
         d_tag_buffer.size() > ln ? d_tag_buffer[ln] : d_tag_buffer.back());
      self_connector_widths.push_back(buffer);
   }
}
/*
 **************************************************************************
 * Compute fine Connector width needed at each coarser level (lnc) for
 * recursive refinement starting with destination level ln.
 **************************************************************************
 */
void
RefineScheduleConnectorWidthRequestor::computeRequiredFineConnectorWidthsForRecursiveRefinement(
   std::vector<hier::IntVector>& fine_connector_widths,
   const hier::IntVector& data_gcw_on_initial_dst_ln,
   const hier::IntVector& max_stencil_width,
   const hier::PatchHierarchy& patch_hierarchy,
   int initial_dst_ln) const
{
   if (static_cast<int>(fine_connector_widths.size()) < initial_dst_ln) {
      fine_connector_widths.insert(
         fine_connector_widths.end(),
         initial_dst_ln - fine_connector_widths.size(),
         hier::IntVector(patch_hierarchy.getDim(), 0,
            patch_hierarchy.getGridGeometry()->getNumberBlocks()) );
   }

   const size_t nblocks = patch_hierarchy.getGridGeometry()->getNumberBlocks();

   hier::IntVector width_for_refining_recursively(
      data_gcw_on_initial_dst_ln * d_gcw_factor, nblocks);

   for (int lnc = initial_dst_ln - 1; lnc > -1; --lnc) {

      const hier::IntVector& ratio_to_coarser =
         patch_hierarchy.getRatioToCoarserLevel(lnc + 1);
      width_for_refining_recursively.ceilingDivide(ratio_to_coarser);

      /*
       * Data in the supplemental level in RefineSchedule may have ghost
       * cells as big as the stencil width.  Coarse_to_fine_width must be
       * big enough to allow coarse to bridge to fine's supplemental, and
       * the supplemental includes the stencil width at coarse.
       */
      width_for_refining_recursively += max_stencil_width;

      fine_connector_widths[lnc].max(width_for_refining_recursively);
   }

}
/*
 **************************************************************************
 * Compute Connector widths that this class requires in order to work
 * properly on a given hierarchy.
 **************************************************************************
 */
void
RefineScheduleConnectorWidthRequestor::computeRequiredConnectorWidths(
   std::vector<hier::IntVector>& self_connector_widths,
   std::vector<hier::IntVector>& fine_connector_widths,
   const hier::PatchHierarchy& patch_hierarchy) const
{
   int max_levels = patch_hierarchy.getMaxNumberOfLevels();

   const tbox::Dimension& dim(patch_hierarchy.getDim());

   /*
    * Add one to max data ghost width to create overlaps of data
    * living on patch boundaries.
    */
   const hier::IntVector max_data_gcw(
      patch_hierarchy.getPatchDescriptor()->getMaxGhostWidth(dim) + 1);

   hier::IntVector max_stencil_width =
      patch_hierarchy.getGridGeometry()->getMaxTransferOpStencilWidth(dim);
   max_stencil_width.max(
      RefinePatchStrategy::getMaxRefineOpStencilWidth(dim));

   hier::IntVector zero_vector(hier::IntVector::getZero(dim),
                               patch_hierarchy.getNumberBlocks());

   /*
    * Compute the Connector width needed to ensure all edges are found
    * during mesh recursive refine schedule generation.  It is safe to
    * be conservative, but carrying around a larger than necessary
    * width requires more memory and slows down Connector operations.
    *
    * All Connectors to self need to be at least wide enough to
    * support the copy of data from the same level into ghost cells.
    * Thus, the width should be at least that of the max ghost data
    * width.  On the finest level, there is no other requirement.  For
    * other levels, we need enough width for:
    *
    * - refining the next finer level
    *
    * - refining recursively starting at each of the levels finer than
    *   it.
    */

   hier::IntVector self_width(max_data_gcw * d_gcw_factor,
                              patch_hierarchy.getNumberBlocks()); 
   self_connector_widths.clear();
   self_connector_widths.resize(max_levels, self_width);

   fine_connector_widths.clear();
   if (max_levels > 1) {
      fine_connector_widths.resize(max_levels - 1, zero_vector); // to be computed below.
   }

   /*
    * Note that the following loops go from fine to coarse.  This is
    * because Connector widths for coarse levels depend on those for
    * fine levels.
    */
   for (int ln = max_levels - 1; ln > -1; --ln) {
      computeRequiredFineConnectorWidthsForRecursiveRefinement(
         fine_connector_widths,
         max_data_gcw,
         max_stencil_width,
         patch_hierarchy,
         ln);
   }

}