Пример #1
0
void
FeatureFloodCount::prepareDataForTransfer()
{
  MeshBase & mesh = _mesh.getMesh();

  std::set<dof_id_type> local_ids_no_ghost, set_difference;

  for (auto & list_ref : _partial_feature_sets)
    for (auto & feature : list_ref)
    {
      /**
       * We need to adjust the halo markings before sending. We need to discard all of the
       * local cell information but not any of the stitch region information. To do that
       * we subtract off the ghosted cells from the local cells and use that in the
       * set difference operation with the halo_ids.
       */
      std::set_difference(feature._local_ids.begin(), feature._local_ids.end(),
                          feature._ghosted_ids.begin(), feature._ghosted_ids.end(),
                          std::insert_iterator<std::set<dof_id_type> >(local_ids_no_ghost, local_ids_no_ghost.begin()));

      std::set_difference(feature._halo_ids.begin(), feature._halo_ids.end(),
                          local_ids_no_ghost.begin(), local_ids_no_ghost.end(),
                          std::insert_iterator<std::set<dof_id_type> >(set_difference, set_difference.begin()));
      feature._halo_ids.swap(set_difference);
      local_ids_no_ghost.clear();
      set_difference.clear();

      mooseAssert(!feature._local_ids.empty(), "local entity ids cannot be empty");

      // Save off the min entity id present in the feature to uniquely identify the feature regardless of n_procs
      feature._min_entity_id = *feature._local_ids.begin();

      for (auto & entity_id : feature._local_ids)
      {
        /**
         * Update the bounding box.
         *
         * Note: There will always be one and only one bbox while we are building up our
         * data structures because we haven't started to stitch together any regions yet.
         */
        if (_is_elemental)
          feature.updateBBoxExtremes(feature._bboxes[0], *mesh.elem(entity_id));
        else
          feature.updateBBoxExtremes(feature._bboxes[0], mesh.node(entity_id));
      }

      // Now extend the bounding box by the halo region
      for (auto & halo_id : feature._halo_ids)
      {
        if (_is_elemental)
          feature.updateBBoxExtremes(feature._bboxes[0], *mesh.elem(halo_id));
        else
          feature.updateBBoxExtremes(feature._bboxes[0], mesh.node(halo_id));
      }

      // Periodic node ids
      appendPeriodicNeighborNodes(feature);
    }
}
Пример #2
0
// TODO: Possibly rename this routine
void
FeatureFloodCount::populateDataStructuresFromFloodData()
{
  MeshBase & mesh = _mesh.getMesh();
  processor_id_type n_procs = _app.n_processors();

  for (unsigned int map_num = 0; map_num < _maps_size; ++map_num)
    for (processor_id_type rank = 0; rank < n_procs; ++rank)
      for (std::vector<FeatureData>::iterator it = _partial_feature_sets[rank][map_num].begin();
           it != _partial_feature_sets[rank][map_num].end(); ++it)
      {
        FeatureData & feature = *it;

        for (std::set<dof_id_type>::iterator entity_it = feature._local_ids.begin(); entity_it != feature._local_ids.end(); ++entity_it)
        {
          dof_id_type entity_id = *entity_it;

          // TODO: This may not be good enough for the elemental case
          const Point & entity_point = _is_elemental ? mesh.elem(entity_id)->centroid() : mesh.node(entity_id);

          /**
           * Update the bounding box.
           *
           * Note: There will always be one and only one bbox while we are building up our
           * data structures because we haven't started to stitch together any regions yet.
           */
          feature.updateBBoxMin(feature._bboxes[0], entity_point);
          feature.updateBBoxMax(feature._bboxes[0], entity_point);

          // Save off the min entity id present in the feature to uniquely identify the feature regardless of n_procs
          feature._min_entity_id = std::min(feature._min_entity_id, entity_id);
        }

        // Adjust the halo marking region
        std::set<dof_id_type> set_difference;

        std::set_difference(feature._halo_ids.begin(), feature._halo_ids.end(), feature._local_ids.begin(), feature._local_ids.end(),
                            std::insert_iterator<std::set<dof_id_type> >(set_difference, set_difference.begin()));
        feature._halo_ids.swap(set_difference);

        // Periodic node ids
        appendPeriodicNeighborNodes(feature);
      }
}
Пример #3
0
void
FeatureFloodCount::prepareDataForTransfer()
{
  MeshBase & mesh = _mesh.getMesh();

  for (auto map_num = decltype(_maps_size)(0); map_num < _maps_size; ++map_num)
    for (auto & feature : _partial_feature_sets[map_num])
    {
      for (auto & entity_id : feature._local_ids)
      {
        /**
         * Update the bounding box.
         *
         * Note: There will always be one and only one bbox while we are building up our
         * data structures because we haven't started to stitch together any regions yet.
         */
        if (_is_elemental)
          feature.updateBBoxExtremes(feature._bboxes[0], *mesh.elem(entity_id));
        else
          feature.updateBBoxExtremes(feature._bboxes[0], mesh.node(entity_id));

        // Save off the min entity id present in the feature to uniquely identify the feature regardless of n_procs
        feature._min_entity_id = std::min(feature._min_entity_id, entity_id);
      }

      // Now extend the bounding box by the halo region
      for (auto & halo_id : feature._halo_ids)
      {
        if (_is_elemental)
          feature.updateBBoxExtremes(feature._bboxes[0], *mesh.elem(halo_id));
        else
          feature.updateBBoxExtremes(feature._bboxes[0], mesh.node(halo_id));
      }

      // Periodic node ids
      appendPeriodicNeighborNodes(feature);
    }
}
Пример #4
0
void
FeatureFloodCount::mergeSets(bool use_periodic_boundary_info)
{
  Moose::perf_log.push("mergeSets()", "FeatureFloodCount");
  std::set<dof_id_type> set_union;
  std::insert_iterator<std::set<dof_id_type> > set_union_inserter(set_union, set_union.begin());

  /**
   * If map_num <= n_processors (normal case), each processor up to map_num will handle one list
   * of nodes and receive the merged nodes from other processors for all other lists.
   */
  for (unsigned int map_num = 0; map_num < _maps_size; ++map_num)
  {
    unsigned int owner_id = map_num % _app.n_processors();
    if (_single_map_mode || owner_id == processor_id())
    {
      // Get an iterator pointing to the end of the list, we'll reuse it several times in the merge algorithm below
      std::list<BubbleData>::iterator end = _bubble_sets[map_num].end();

      // Next add periodic neighbor information if requested to the BubbleData objects
      if (use_periodic_boundary_info)
        for (std::list<BubbleData>::iterator it = _bubble_sets[map_num].begin(); it != end; ++it)
          appendPeriodicNeighborNodes(*it);

      // Finally start our merge loops
      for (std::list<BubbleData>::iterator it1 = _bubble_sets[map_num].begin(); it1 != end; /* No increment */)
      {
        bool need_it1_increment = true;

        for (std::list<BubbleData>::iterator it2 = it1; it2 != end; ++it2)
        {
          if (it1 != it2 &&                                                               // Make sure that these iterators aren't pointing at the same set
              it1->_var_idx == it2->_var_idx &&                                           // and that the sets have matching variable indices...
              (setsIntersect(it1->_entity_ids.begin(), it1->_entity_ids.end(),            // Do they overlap on the current entity type? OR..
                             it2->_entity_ids.begin(), it2->_entity_ids.end()) ||
                 (use_periodic_boundary_info &&                                           // Are we merging across periodic boundaries? AND
                 setsIntersect(it1->_periodic_nodes.begin(), it1->_periodic_nodes.end(),  // Do they overlap on periodic nodes?
                               it2->_periodic_nodes.begin(), it2->_periodic_nodes.end())
                 )
              )
            )
          {
            // Merge these two entity sets
            set_union.clear();
            std::set_union(it1->_entity_ids.begin(), it1->_entity_ids.end(), it2->_entity_ids.begin(), it2->_entity_ids.end(), set_union_inserter);
            // Put the merged set in the latter iterator so that we'll compare earlier sets to it again
            it2->_entity_ids = set_union;

            // If we are merging periodic boundaries we'll need to merge those nodes too
            if (use_periodic_boundary_info)
            {
              set_union.clear();
              std::set_union(it1->_periodic_nodes.begin(), it1->_periodic_nodes.end(), it2->_periodic_nodes.begin(), it2->_periodic_nodes.end(), set_union_inserter);
              it2->_periodic_nodes = set_union;
            }

            // Now remove the merged set, the one we didn't update (it1)
            _bubble_sets[map_num].erase(it1++);
            // don't increment the outer loop since we just deleted it incremented
            need_it1_increment = false;
            // break out of the inner loop and move on
            break;
          }
        }

        if (need_it1_increment)
          ++it1;
      }
    }
  }

  if (!_single_map_mode)
    for (unsigned int map_num = 0; map_num < _maps_size; ++map_num)
      // Now communicate this list with all the other processors
      communicateOneList(_bubble_sets[map_num], map_num % _app.n_processors(), map_num);

  Moose::perf_log.pop("mergeSets()", "FeatureFloodCount");
}
Пример #5
0
void
NodalFloodCount::pack(std::vector<unsigned int> & packed_data, bool merge_periodic_info) const
{
  /**
   * Don't repack the data if it's already packed - we might lose data that was updated
   * or stored into the packed_data that is not available in the local thread.
   * This happens when we call threadJoin which does not unpack the data on the local thread.
   */
  if (!packed_data.empty())
    return;

  /**
   * We need a data structure that reorganizes the region markings into sets so that we can pack them up
   * in a form to marshall them between processors.  The set of nodes are stored by map_num, region_num.
   **/
  std::vector<std::vector<std::set<unsigned int> > > data(_maps_size);

  for (unsigned int map_num = 0; map_num < _maps_size; ++map_num)
  {
    data[map_num].resize(_region_counts[map_num]+1);

    unsigned int n_periodic_nodes = 0;
    {
      std::map<unsigned int, int>::const_iterator end = _bubble_maps[map_num].end();
      // Reorganize the data by values

      for (std::map<unsigned int, int>::const_iterator it = _bubble_maps[map_num].begin(); it != end; ++it)
        data[map_num][(it->second)].insert(it->first);

      // Append our periodic neighbor nodes to the data structure before packing
      if (merge_periodic_info)
        for (std::vector<std::set<unsigned int> >::iterator it = data[map_num].begin(); it != data[map_num].end(); ++it)
          n_periodic_nodes += appendPeriodicNeighborNodes(*it);

      mooseAssert(_region_counts[map_num]+1 == data[map_num].size(), "Error in packing data");
    }

    {
      /**
       * The size of the packed data structure should be the sum of all of the following:
       * total number of marked nodes
       * the owning variable index for the current bubble
       * inserted periodic neighbor information
       * the number of unique bubbles.
       *
       * We will pack the data into a series of groups representing each unique bubble
       * the nodes for each group will be proceeded by the number of nodes in that group
       * [ <i_nodes> <var_idx> <n_0> <n_1> ... <n_i> <j_nodes> <var_idx> <n_0> <n_1> ... <n_j> ]
       */

      // Note the _region_counts[mar_num]*2 takes into account the number of nodes and the variable index for each region
      std::vector<unsigned int> partial_packed_data(_bubble_maps[map_num].size() + n_periodic_nodes + _region_counts[map_num]*2);

      // Now pack it up
      unsigned int current_idx = 0;

      mooseAssert(data[map_num][0].empty(), "We have nodes marked with zeros - something is not correct");
      // Note: The zeroth "region" is everything outside of a bubble - we don't want to put
      // that into our packed data structure so start at 1 here!
      for (unsigned int i = 1 /* Yes - start at 1 */; i <= _region_counts[map_num]; ++i)
      {
        partial_packed_data[current_idx++] = data[map_num][i].size();     // The number of nodes in the current region

        if (_single_map_mode)
        {
          mooseAssert(i-1 < _region_to_var_idx.size(), "Index out of bounds in NodalFloodCounter");
          partial_packed_data[current_idx++] = _region_to_var_idx[i-1];   // The variable owning this bubble
        }
        else
          partial_packed_data[current_idx++] = map_num;                   // The variable owning this bubble

        std::set<unsigned int>::iterator end = data[map_num][i].end();
        for (std::set<unsigned int>::iterator it = data[map_num][i].begin(); it != end; ++it)
          partial_packed_data[current_idx++] = *it;                       // The individual node ids
      }

      packed_data.insert(packed_data.end(), partial_packed_data.begin(), partial_packed_data.end());
    }
  }
}