void FeatureFloodCount::mergeSets(bool use_periodic_boundary_info) { Moose::perf_log.push("mergeSets()", "FeatureFloodCount"); std::set<dof_id_type> set_union; for (auto map_num = decltype(_maps_size)(0); map_num < _maps_size; ++map_num) { for (auto it1 = _partial_feature_sets[map_num].begin(); it1 != _partial_feature_sets[map_num].end(); /* No increment on it1 */) { bool merge_occured = false; for (auto it2 = _partial_feature_sets[map_num].begin(); it2 != _partial_feature_sets[map_num].end(); ++it2) { bool pb_intersect = false; if (it1 != it2 && // Make sure that these iterators aren't pointing at the same set it1->_var_idx == it2->_var_idx && // and that the sets have matching variable indices ((use_periodic_boundary_info && // and (if merging across periodic nodes (pb_intersect = it1->periodicBoundariesIntersect(*it2))) // do those periodic nodes intersect? || // or (it1->boundingBoxesIntersect(*it2) && // if the region bboxes intersect it1->ghostedIntersect(*it2) // do the ghosted entities also intersect) ) ) ) { it2->merge(std::move(*it1)); // Insert the new entity at the end of the list so that it may be checked against all other partial features again _partial_feature_sets[map_num].emplace_back(std::move(*it2)); /** * Now remove both halves the merged features: it2 contains the "moved" feature cell just inserted * at the back of the list, it1 contains the mostly empty other half. We have to be careful about the * order in which these two elements are deleted. We delete it2 first since we don't care where its * iterator points after the deletion. We are going to break out of this loop anyway. If we delete * it1 first, it may end up pointing at the same location as it2 which after the second deletion would * cause both of the iterators to be invalidated. */ _partial_feature_sets[map_num].erase(it2); it1 = _partial_feature_sets[map_num].erase(it1); // it1 is incremented here! // A merge occurred, this is used to determine whether or not we increment the outer iterator merge_occured = true; // We need to start the list comparison over for the new it1 so break here break; } } // it2 loop if (!merge_occured) // No merges so we need to manually increment the outer iterator ++it1; } // it1 loop } // map loop /** * All of the merges are complete and stored in a vector of lists. To make several * of the sorting and tracking algorithms more straightforward, we will move these * items into a vector of vectors instead. */ _feature_count = 0; for (auto map_num = decltype(_maps_size)(0); map_num < _maps_size; ++map_num) { for (auto & feature : _partial_feature_sets[map_num]) { // Adjust the halo marking region std::set<dof_id_type> set_difference; std::set_difference(feature._halo_ids.begin(), feature._halo_ids.end(), feature._local_ids.begin(), feature._local_ids.end(), std::insert_iterator<std::set<dof_id_type> >(set_difference, set_difference.begin())); feature._halo_ids.swap(set_difference); _feature_sets[map_num].emplace_back(std::move(feature)); ++_feature_count; } _partial_feature_sets[map_num].clear(); } Moose::perf_log.pop("mergeSets()", "FeatureFloodCount"); }
void FeatureFloodCount::mergeSets(bool use_periodic_boundary_info) { Moose::perf_log.push("mergeSets()", "FeatureFloodCount"); // Since we gathered only on the root process, we only need to merge sets on the root process. mooseAssert(_is_master, "mergeSets() should only be called on the root process"); // Local variable used for sizing structures, it will be >= the actual number of features for (auto map_num = decltype(_maps_size)(0); map_num < _maps_size; ++map_num) { for (auto it1 = _partial_feature_sets[map_num].begin(); it1 != _partial_feature_sets[map_num].end(); /* No increment on it1 */) { bool merge_occured = false; for (auto it2 = _partial_feature_sets[map_num].begin(); it2 != _partial_feature_sets[map_num].end(); ++it2) { bool pb_intersect = false; if (it1 != it2 && // Make sure that these iterators aren't pointing at the same set it1->_var_index == it2->_var_index && // and that the sets have matching variable indices ((use_periodic_boundary_info && // and (if merging across periodic nodes (pb_intersect = it1->periodicBoundariesIntersect(*it2))) // do those periodic nodes intersect? || // or (it1->boundingBoxesIntersect(*it2) && // if the region bboxes intersect it1->ghostedIntersect(*it2) // do the ghosted entities also intersect) ) ) ) { it2->merge(std::move(*it1)); // Insert the new entity at the end of the list so that it may be checked against all other partial features again _partial_feature_sets[map_num].emplace_back(std::move(*it2)); /** * Now remove both halves the merged features: it2 contains the "moved" feature cell just inserted * at the back of the list, it1 contains the mostly empty other half. We have to be careful about the * order in which these two elements are deleted. We delete it2 first since we don't care where its * iterator points after the deletion. We are going to break out of this loop anyway. If we delete * it1 first, it may end up pointing at the same location as it2 which after the second deletion would * cause both of the iterators to be invalidated. */ _partial_feature_sets[map_num].erase(it2); it1 = _partial_feature_sets[map_num].erase(it1); // it1 is incremented here! // A merge occurred, this is used to determine whether or not we increment the outer iterator merge_occured = true; // We need to start the list comparison over for the new it1 so break here break; } } // it2 loop if (!merge_occured) // No merges so we need to manually increment the outer iterator ++it1; } // it1 loop } // map loop /** * Now that the merges are complete we need to adjust the centroid, and halos. * Additionally, To make several of the sorting and tracking algorithms more straightforward, * we will move the features into a flat vector. Finally we can count the final number * of features and find the max local index seen on any processor * Note: This is all occurring on rank 0 only! */ // Offset where the current set of features with the same variable id starts in the flat vector unsigned int feature_offset = 0; // Set the member feature count to zero and start counting the actual features _feature_count = 0; for (auto map_num = decltype(_maps_size)(0); map_num < _maps_size; ++map_num) { std::set<dof_id_type> set_difference; for (auto & feature : _partial_feature_sets[map_num]) { // First we need to calculate the centroid now that we are doing merging all partial features if (feature._vol_count != 0) feature._centroid /= feature._vol_count; _feature_sets.emplace_back(std::move(feature)); ++_feature_count; } // Record the feature numbers just for the current map _feature_counts_per_map[map_num] = _feature_count - feature_offset; // Now update the running feature count so we can calculate the next map's contribution feature_offset = _feature_count; // Clean up the "moved" objects _partial_feature_sets[map_num].clear(); } /** * IMPORTANT: FeatureFloodCount::_feature_count is set on rank 0 at this point but * we can't broadcast it here because this routine is not collective. */ Moose::perf_log.pop("mergeSets()", "FeatureFloodCount"); }