void LocationMap<T>::init(MeshBase& mesh) { // This function must be run on all processors at once // for non-serial meshes if (!mesh.is_serial()) parallel_only(); START_LOG("init()", "LocationMap"); // Clear the old map _map.clear(); // Cache a bounding box _lower_bound.clear(); _lower_bound.resize(LIBMESH_DIM, std::numeric_limits<Real>::max()); _upper_bound.clear(); _upper_bound.resize(LIBMESH_DIM, -std::numeric_limits<Real>::max()); MeshBase::node_iterator it = mesh.nodes_begin(); const MeshBase::node_iterator end = mesh.nodes_end(); for (; it != end; ++it) { Node* node = *it; for (unsigned int i=0; i != LIBMESH_DIM; ++i) { // Expand the bounding box if necessary _lower_bound[i] = std::min(_lower_bound[i], (*node)(i)); _upper_bound[i] = std::max(_upper_bound[i], (*node)(i)); } } // On a parallel mesh we might not yet have a full bounding box if (!mesh.is_serial()) { CommWorld.min(_lower_bound); CommWorld.max(_upper_bound); } this->fill(mesh); STOP_LOG("init()", "LocationMap"); }
void LocationMap<T>::init(MeshBase & mesh) { // This function must be run on all processors at once // for non-serial meshes if (!mesh.is_serial()) libmesh_parallel_only(mesh.comm()); LOG_SCOPE("init()", "LocationMap"); // Clear the old map _map.clear(); // Cache a bounding box _lower_bound.clear(); _lower_bound.resize(LIBMESH_DIM, std::numeric_limits<Real>::max()); _upper_bound.clear(); _upper_bound.resize(LIBMESH_DIM, -std::numeric_limits<Real>::max()); for (auto & node : mesh.node_ptr_range()) for (unsigned int i=0; i != LIBMESH_DIM; ++i) { // Expand the bounding box if necessary _lower_bound[i] = std::min(_lower_bound[i], (*node)(i)); _upper_bound[i] = std::max(_upper_bound[i], (*node)(i)); } // On a parallel mesh we might not yet have a full bounding box if (!mesh.is_serial()) { mesh.comm().min(_lower_bound); mesh.comm().max(_upper_bound); } this->fill(mesh); }
//-------------------------------------------------------------------------- void TopologyMap::init(MeshBase& mesh) { // This function must be run on all processors at once // for non-serial meshes if (!mesh.is_serial()) libmesh_parallel_only(mesh.comm()); START_LOG("init()", "TopologyMap"); // Clear the old map _map.clear(); this->fill(mesh); STOP_LOG("init()", "TopologyMap"); }
void Partitioner::set_parent_processor_ids(MeshBase & mesh) { // Ignore the parameter when !LIBMESH_ENABLE_AMR libmesh_ignore(mesh); LOG_SCOPE("set_parent_processor_ids()", "Partitioner"); #ifdef LIBMESH_ENABLE_AMR // If the mesh is serial we have access to all the elements, // in particular all the active ones. We can therefore set // the parent processor ids indirecly through their children, and // set the subactive processor ids while examining their active // ancestors. // By convention a parent is assigned to the minimum processor // of all its children, and a subactive is assigned to the processor // of its active ancestor. if (mesh.is_serial()) { // Loop over all the active elements in the mesh MeshBase::element_iterator it = mesh.active_elements_begin(); const MeshBase::element_iterator end = mesh.active_elements_end(); for ( ; it!=end; ++it) { Elem * child = *it; // First set descendents std::vector<const Elem *> subactive_family; child->total_family_tree(subactive_family); for (unsigned int i = 0; i != subactive_family.size(); ++i) const_cast<Elem *>(subactive_family[i])->processor_id() = child->processor_id(); // Then set ancestors Elem * parent = child->parent(); while (parent) { // invalidate the parent id, otherwise the min below // will not work if the current parent id is less // than all the children! parent->invalidate_processor_id(); for (unsigned int c=0; c<parent->n_children(); c++) { child = parent->child_ptr(c); libmesh_assert(child); libmesh_assert(!child->is_remote()); libmesh_assert_not_equal_to (child->processor_id(), DofObject::invalid_processor_id); parent->processor_id() = std::min(parent->processor_id(), child->processor_id()); } parent = parent->parent(); } } } // When the mesh is parallel we cannot guarantee that parents have access to // all their children. else { // Setting subactive processor ids is easy: we can guarantee // that children have access to all their parents. // Loop over all the active elements in the mesh MeshBase::element_iterator it = mesh.active_elements_begin(); const MeshBase::element_iterator end = mesh.active_elements_end(); for ( ; it!=end; ++it) { Elem * child = *it; std::vector<const Elem *> subactive_family; child->total_family_tree(subactive_family); for (unsigned int i = 0; i != subactive_family.size(); ++i) const_cast<Elem *>(subactive_family[i])->processor_id() = child->processor_id(); } // When the mesh is parallel we cannot guarantee that parents have access to // all their children. // We will use a brute-force approach here. Each processor finds its parent // elements and sets the parent pid to the minimum of its // semilocal descendants. // A global reduction is then performed to make sure the true minimum is found. // As noted, this is required because we cannot guarantee that a parent has // access to all its children on any single processor. libmesh_parallel_only(mesh.comm()); libmesh_assert(MeshTools::n_elem(mesh.unpartitioned_elements_begin(), mesh.unpartitioned_elements_end()) == 0); const dof_id_type max_elem_id = mesh.max_elem_id(); std::vector<processor_id_type> parent_processor_ids (std::min(communication_blocksize, max_elem_id)); for (dof_id_type blk=0, last_elem_id=0; last_elem_id<max_elem_id; blk++) { last_elem_id = std::min(static_cast<dof_id_type>((blk+1)*communication_blocksize), max_elem_id); const dof_id_type first_elem_id = blk*communication_blocksize; std::fill (parent_processor_ids.begin(), parent_processor_ids.end(), DofObject::invalid_processor_id); // first build up local contributions to parent_processor_ids MeshBase::element_iterator not_it = mesh.ancestor_elements_begin(); const MeshBase::element_iterator not_end = mesh.ancestor_elements_end(); bool have_parent_in_block = false; for ( ; not_it != not_end; ++not_it) { Elem * parent = *not_it; const dof_id_type parent_idx = parent->id(); libmesh_assert_less (parent_idx, max_elem_id); if ((parent_idx >= first_elem_id) && (parent_idx < last_elem_id)) { have_parent_in_block = true; processor_id_type parent_pid = DofObject::invalid_processor_id; std::vector<const Elem *> active_family; parent->active_family_tree(active_family); for (unsigned int i = 0; i != active_family.size(); ++i) parent_pid = std::min (parent_pid, active_family[i]->processor_id()); const dof_id_type packed_idx = parent_idx - first_elem_id; libmesh_assert_less (packed_idx, parent_processor_ids.size()); parent_processor_ids[packed_idx] = parent_pid; } } // then find the global minimum mesh.comm().min (parent_processor_ids); // and assign the ids, if we have a parent in this block. if (have_parent_in_block) for (not_it = mesh.ancestor_elements_begin(); not_it != not_end; ++not_it) { Elem * parent = *not_it; const dof_id_type parent_idx = parent->id(); if ((parent_idx >= first_elem_id) && (parent_idx < last_elem_id)) { const dof_id_type packed_idx = parent_idx - first_elem_id; libmesh_assert_less (packed_idx, parent_processor_ids.size()); const processor_id_type parent_pid = parent_processor_ids[packed_idx]; libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_processor_id); parent->processor_id() = parent_pid; } } } } #endif // LIBMESH_ENABLE_AMR }
void MetisPartitioner::partition_range(MeshBase & mesh, MeshBase::element_iterator beg, MeshBase::element_iterator end, unsigned int n_pieces) { libmesh_assert_greater (n_pieces, 0); // We don't yet support distributed meshes with this Partitioner if (!mesh.is_serial()) libmesh_not_implemented(); // Check for an easy return if (n_pieces == 1) { this->single_partition_range (beg, end); return; } // What to do if the Metis library IS NOT present #ifndef LIBMESH_HAVE_METIS libmesh_here(); libMesh::err << "ERROR: The library has been built without" << std::endl << "Metis support. Using a space-filling curve" << std::endl << "partitioner instead!" << std::endl; SFCPartitioner sfcp; sfcp.partition_range (mesh, beg, end, n_pieces); // What to do if the Metis library IS present #else LOG_SCOPE("partition_range()", "MetisPartitioner"); const dof_id_type n_range_elem = std::distance(beg, end); // Metis will only consider the elements in the range. // We need to map the range element ids into a // contiguous range. Further, we want the unique range indexing to be // independent of the element ordering, otherwise a circular dependency // can result in which the partitioning depends on the ordering which // depends on the partitioning... vectormap<dof_id_type, dof_id_type> global_index_map; global_index_map.reserve (n_range_elem); { std::vector<dof_id_type> global_index; MeshCommunication().find_global_indices (mesh.comm(), MeshTools::create_bounding_box(mesh), beg, end, global_index); libmesh_assert_equal_to (global_index.size(), n_range_elem); MeshBase::element_iterator it = beg; for (std::size_t cnt=0; it != end; ++it) { const Elem * elem = *it; global_index_map.insert (std::make_pair(elem->id(), global_index[cnt++])); } libmesh_assert_equal_to (global_index_map.size(), n_range_elem); } // If we have boundary elements in this mesh, we want to account for // the connectivity between them and interior elements. We can find // interior elements from boundary elements, but we need to build up // a lookup map to do the reverse. typedef std::unordered_multimap<const Elem *, const Elem *> map_type; map_type interior_to_boundary_map; { MeshBase::element_iterator it = beg; for (; it != end; ++it) { const Elem * elem = *it; // If we don't have an interior_parent then there's nothing // to look us up. if ((elem->dim() >= LIBMESH_DIM) || !elem->interior_parent()) continue; // get all relevant interior elements std::set<const Elem *> neighbor_set; elem->find_interior_neighbors(neighbor_set); std::set<const Elem *>::iterator n_it = neighbor_set.begin(); for (; n_it != neighbor_set.end(); ++n_it) { // FIXME - non-const versions of the std::set<const Elem // *> returning methods would be nice Elem * neighbor = const_cast<Elem *>(*n_it); #if defined(LIBMESH_HAVE_UNORDERED_MULTIMAP) || \ defined(LIBMESH_HAVE_TR1_UNORDERED_MULTIMAP) || \ defined(LIBMESH_HAVE_HASH_MULTIMAP) || \ defined(LIBMESH_HAVE_EXT_HASH_MULTIMAP) interior_to_boundary_map.insert(std::make_pair(neighbor, elem)); #else interior_to_boundary_map.insert(interior_to_boundary_map.begin(), std::make_pair(neighbor, elem)); #endif } } } // Data structure that Metis will fill up on processor 0 and broadcast. std::vector<Metis::idx_t> part(n_range_elem); // Invoke METIS, but only on processor 0. // Then broadcast the resulting decomposition if (mesh.processor_id() == 0) { // Data structures and parameters needed only on processor 0 by Metis. // std::vector<Metis::idx_t> options(5); std::vector<Metis::idx_t> vwgt(n_range_elem); Metis::idx_t n = static_cast<Metis::idx_t>(n_range_elem), // number of "nodes" (elements) in the graph // wgtflag = 2, // weights on vertices only, none on edges // numflag = 0, // C-style 0-based numbering nparts = static_cast<Metis::idx_t>(n_pieces), // number of subdomains to create edgecut = 0; // the numbers of edges cut by the resulting partition // Set the options // options[0] = 0; // use default options // build the graph METIS_CSR_Graph<Metis::idx_t> csr_graph; csr_graph.offsets.resize(n_range_elem + 1, 0); // Local scope for these { // build the graph in CSR format. Note that // the edges in the graph will correspond to // face neighbors #ifdef LIBMESH_ENABLE_AMR std::vector<const Elem *> neighbors_offspring; #endif #ifndef NDEBUG std::size_t graph_size=0; #endif // (1) first pass - get the row sizes for each element by counting the number // of face neighbors. Also populate the vwght array if necessary MeshBase::element_iterator it = beg; for (; it != end; ++it) { const Elem * elem = *it; const dof_id_type elem_global_index = global_index_map[elem->id()]; libmesh_assert_less (elem_global_index, vwgt.size()); // maybe there is a better weight? // The weight is used to define what a balanced graph is if (!_weights) vwgt[elem_global_index] = elem->n_nodes(); else vwgt[elem_global_index] = static_cast<Metis::idx_t>((*_weights)[elem->id()]); unsigned int num_neighbors = 0; // Loop over the element's neighbors. An element // adjacency corresponds to a face neighbor for (auto neighbor : elem->neighbor_ptr_range()) { if (neighbor != libmesh_nullptr) { // If the neighbor is active, but is not in the // range of elements being partitioned, treat it // as a NULL neighbor. if (neighbor->active() && !global_index_map.count(neighbor->id())) continue; // If the neighbor is active treat it // as a connection if (neighbor->active()) num_neighbors++; #ifdef LIBMESH_ENABLE_AMR // Otherwise we need to find all of the // neighbor's children that are connected to // us and add them else { // The side of the neighbor to which // we are connected const unsigned int ns = neighbor->which_neighbor_am_i (elem); libmesh_assert_less (ns, neighbor->n_neighbors()); // Get all the active children (& grandchildren, etc...) // of the neighbor. // FIXME - this is the wrong thing, since we // should be getting the active family tree on // our side only. But adding too many graph // links may cause hanging nodes to tend to be // on partition interiors, which would reduce // communication overhead for constraint // equations, so we'll leave it. neighbor->active_family_tree (neighbors_offspring); // Get all the neighbor's children that // live on that side and are thus connected // to us for (std::size_t nc=0; nc<neighbors_offspring.size(); nc++) { const Elem * child = neighbors_offspring[nc]; // Skip neighbor offspring which are not in the range of elements being partitioned. if (!global_index_map.count(child->id())) continue; // This does not assume a level-1 mesh. // Note that since children have sides numbered // coincident with the parent then this is a sufficient test. if (child->neighbor_ptr(ns) == elem) { libmesh_assert (child->active()); num_neighbors++; } } } #endif /* ifdef LIBMESH_ENABLE_AMR */ } } // Check for any interior neighbors if ((elem->dim() < LIBMESH_DIM) && elem->interior_parent()) { // get all relevant interior elements std::set<const Elem *> neighbor_set; elem->find_interior_neighbors(neighbor_set); num_neighbors += neighbor_set.size(); } // Check for any boundary neighbors typedef map_type::iterator map_it_type; std::pair<map_it_type, map_it_type> bounds = interior_to_boundary_map.equal_range(elem); num_neighbors += std::distance(bounds.first, bounds.second); csr_graph.prep_n_nonzeros(elem_global_index, num_neighbors); #ifndef NDEBUG graph_size += num_neighbors; #endif } csr_graph.prepare_for_use(); // (2) second pass - fill the compressed adjacency array it = beg; for (; it != end; ++it) { const Elem * elem = *it; const dof_id_type elem_global_index = global_index_map[elem->id()]; unsigned int connection=0; // Loop over the element's neighbors. An element // adjacency corresponds to a face neighbor for (auto neighbor : elem->neighbor_ptr_range()) { if (neighbor != libmesh_nullptr) { // If the neighbor is active, but is not in the // range of elements being partitioned, treat it // as a NULL neighbor. if (neighbor->active() && !global_index_map.count(neighbor->id())) continue; // If the neighbor is active treat it // as a connection if (neighbor->active()) csr_graph(elem_global_index, connection++) = global_index_map[neighbor->id()]; #ifdef LIBMESH_ENABLE_AMR // Otherwise we need to find all of the // neighbor's children that are connected to // us and add them else { // The side of the neighbor to which // we are connected const unsigned int ns = neighbor->which_neighbor_am_i (elem); libmesh_assert_less (ns, neighbor->n_neighbors()); // Get all the active children (& grandchildren, etc...) // of the neighbor. neighbor->active_family_tree (neighbors_offspring); // Get all the neighbor's children that // live on that side and are thus connected // to us for (std::size_t nc=0; nc<neighbors_offspring.size(); nc++) { const Elem * child = neighbors_offspring[nc]; // Skip neighbor offspring which are not in the range of elements being partitioned. if (!global_index_map.count(child->id())) continue; // This does not assume a level-1 mesh. // Note that since children have sides numbered // coincident with the parent then this is a sufficient test. if (child->neighbor_ptr(ns) == elem) { libmesh_assert (child->active()); csr_graph(elem_global_index, connection++) = global_index_map[child->id()]; } } } #endif /* ifdef LIBMESH_ENABLE_AMR */ } } if ((elem->dim() < LIBMESH_DIM) && elem->interior_parent()) { // get all relevant interior elements std::set<const Elem *> neighbor_set; elem->find_interior_neighbors(neighbor_set); std::set<const Elem *>::iterator n_it = neighbor_set.begin(); for (; n_it != neighbor_set.end(); ++n_it) { const Elem * neighbor = *n_it; // Not all interior neighbors are necessarily in // the same Mesh (hence not in the global_index_map). // This will be the case when partitioning a // BoundaryMesh, whose elements all have // interior_parents() that belong to some other // Mesh. const Elem * queried_elem = mesh.query_elem_ptr(neighbor->id()); // Compare the neighbor and the queried_elem // pointers, make sure they are the same. if (queried_elem && queried_elem == neighbor) { vectormap<dof_id_type, dof_id_type>::iterator global_index_map_it = global_index_map.find(neighbor->id()); // If the interior_neighbor is in the Mesh but // not in the global_index_map, we have other issues. if (global_index_map_it == global_index_map.end()) libmesh_error_msg("Interior neighbor with id " << neighbor->id() << " not found in global_index_map."); else csr_graph(elem_global_index, connection++) = global_index_map_it->second; } } } // Check for any boundary neighbors for (const auto & pr : as_range(interior_to_boundary_map.equal_range(elem))) { const Elem * neighbor = pr.second; csr_graph(elem_global_index, connection++) = global_index_map[neighbor->id()]; } } // We create a non-empty vals for a disconnected graph, to // work around a segfault from METIS. libmesh_assert_equal_to (csr_graph.vals.size(), std::max(graph_size, std::size_t(1))); } // done building the graph Metis::idx_t ncon = 1; // Select which type of partitioning to create // Use recursive if the number of partitions is less than or equal to 8 if (n_pieces <= 8) Metis::METIS_PartGraphRecursive(&n, &ncon, &csr_graph.offsets[0], &csr_graph.vals[0], &vwgt[0], libmesh_nullptr, libmesh_nullptr, &nparts, libmesh_nullptr, libmesh_nullptr, libmesh_nullptr, &edgecut, &part[0]); // Otherwise use kway else Metis::METIS_PartGraphKway(&n, &ncon, &csr_graph.offsets[0], &csr_graph.vals[0], &vwgt[0], libmesh_nullptr, libmesh_nullptr, &nparts, libmesh_nullptr, libmesh_nullptr, libmesh_nullptr, &edgecut, &part[0]); } // end processor 0 part // Broadcast the resulting partition mesh.comm().broadcast(part); // Assign the returned processor ids. The part array contains // the processor id for each active element, but in terms of // the contiguous indexing we defined above { MeshBase::element_iterator it = beg; for (; it!=end; ++it) { Elem * elem = *it; libmesh_assert (global_index_map.count(elem->id())); const dof_id_type elem_global_index = global_index_map[elem->id()]; libmesh_assert_less (elem_global_index, part.size()); const processor_id_type elem_procid = static_cast<processor_id_type>(part[elem_global_index]); elem->processor_id() = elem_procid; } } #endif }
void CentroidPartitioner::partition_range(MeshBase & mesh, MeshBase::element_iterator it, MeshBase::element_iterator end, unsigned int n) { // Check for an easy return if (n == 1) { this->single_partition_range (it, end); return; } // We don't yet support distributed meshes with this Partitioner if (!mesh.is_serial()) libmesh_not_implemented(); // Compute the element centroids. Note: we used to skip this step // if the number of elements was unchanged from the last call, but // that doesn't account for elements that have moved a lot since the // last time the Partitioner was called... this->compute_centroids (it, end); switch (this->sort_method()) { case X: { std::sort(_elem_centroids.begin(), _elem_centroids.end(), CentroidPartitioner::sort_x); break; } case Y: { std::sort(_elem_centroids.begin(), _elem_centroids.end(), CentroidPartitioner::sort_y); break; } case Z: { std::sort(_elem_centroids.begin(), _elem_centroids.end(), CentroidPartitioner::sort_z); break; } case RADIAL: { std::sort(_elem_centroids.begin(), _elem_centroids.end(), CentroidPartitioner::sort_radial); break; } default: libmesh_error_msg("Unknown sort method: " << this->sort_method()); } // Make sure the user has not handed us an // invalid number of partitions. libmesh_assert_greater (n, 0); // Compute target_size, the approximate number of elements on each processor. const dof_id_type target_size = _elem_centroids.size() / n; for (dof_id_type i=0; i<_elem_centroids.size(); i++) { Elem * elem = _elem_centroids[i].second; // FIXME: All "extra" elements go on the last processor... this // could probably be improved. elem->processor_id() = std::min (cast_int<processor_id_type>(i / target_size), cast_int<processor_id_type>(n-1)); } }
// ------------------------------------------------------------ // MetisPartitioner implementation void MetisPartitioner::_do_partition (MeshBase& mesh, const unsigned int n_pieces) { libmesh_assert_greater (n_pieces, 0); libmesh_assert (mesh.is_serial()); // Check for an easy return if (n_pieces == 1) { this->single_partition (mesh); return; } // What to do if the Metis library IS NOT present #ifndef LIBMESH_HAVE_METIS libmesh_here(); libMesh::err << "ERROR: The library has been built without" << std::endl << "Metis support. Using a space-filling curve" << std::endl << "partitioner instead!" << std::endl; SFCPartitioner sfcp; sfcp.partition (mesh, n_pieces); // What to do if the Metis library IS present #else START_LOG("partition()", "MetisPartitioner"); const dof_id_type n_active_elem = mesh.n_active_elem(); // build the graph // std::vector<int> options(5); std::vector<int> vwgt(n_active_elem); std::vector<int> part(n_active_elem); int n = static_cast<int>(n_active_elem), // number of "nodes" (elements) // in the graph // wgtflag = 2, // weights on vertices only, // // none on edges // numflag = 0, // C-style 0-based numbering nparts = static_cast<int>(n_pieces), // number of subdomains to create edgecut = 0; // the numbers of edges cut by the // resulting partition // Set the options // options[0] = 0; // use default options // Metis will only consider the active elements. // We need to map the active element ids into a // contiguous range. Further, we want the unique range indexing to be // independednt of the element ordering, otherwise a circular dependency // can result in which the partitioning depends on the ordering which // depends on the partitioning... std::map<const Elem*, dof_id_type> global_index_map; { std::vector<dof_id_type> global_index; MeshBase::element_iterator it = mesh.active_elements_begin(); const MeshBase::element_iterator end = mesh.active_elements_end(); MeshCommunication().find_global_indices (MeshTools::bounding_box(mesh), it, end, global_index); libmesh_assert_equal_to (global_index.size(), n_active_elem); for (std::size_t cnt=0; it != end; ++it) { const Elem *elem = *it; libmesh_assert (!global_index_map.count(elem)); global_index_map[elem] = global_index[cnt++]; } libmesh_assert_equal_to (global_index_map.size(), n_active_elem); } // build the graph in CSR format. Note that // the edges in the graph will correspond to // face neighbors std::vector<int> xadj, adjncy; { std::vector<const Elem*> neighbors_offspring; MeshBase::element_iterator elem_it = mesh.active_elements_begin(); const MeshBase::element_iterator elem_end = mesh.active_elements_end(); // This will be exact when there is no refinement and all the // elements are of the same type. std::size_t graph_size=0; std::vector<std::vector<dof_id_type> > graph(n_active_elem); for (; elem_it != elem_end; ++elem_it) { const Elem* elem = *elem_it; libmesh_assert (global_index_map.count(elem)); const dof_id_type elem_global_index = global_index_map[elem]; libmesh_assert_less (elem_global_index, vwgt.size()); libmesh_assert_less (elem_global_index, graph.size()); // maybe there is a better weight? // The weight is used to define what a balanced graph is if(!_weights) vwgt[elem_global_index] = elem->n_nodes(); else vwgt[elem_global_index] = static_cast<int>((*_weights)[elem->id()]); // Loop over the element's neighbors. An element // adjacency corresponds to a face neighbor for (unsigned int ms=0; ms<elem->n_neighbors(); ms++) { const Elem* neighbor = elem->neighbor(ms); if (neighbor != NULL) { // If the neighbor is active treat it // as a connection if (neighbor->active()) { libmesh_assert (global_index_map.count(neighbor)); const dof_id_type neighbor_global_index = global_index_map[neighbor]; graph[elem_global_index].push_back(neighbor_global_index); graph_size++; } #ifdef LIBMESH_ENABLE_AMR // Otherwise we need to find all of the // neighbor's children that are connected to // us and add them else { // The side of the neighbor to which // we are connected const unsigned int ns = neighbor->which_neighbor_am_i (elem); libmesh_assert_less (ns, neighbor->n_neighbors()); // Get all the active children (& grandchildren, etc...) // of the neighbor. neighbor->active_family_tree (neighbors_offspring); // Get all the neighbor's children that // live on that side and are thus connected // to us for (unsigned int nc=0; nc<neighbors_offspring.size(); nc++) { const Elem* child = neighbors_offspring[nc]; // This does not assume a level-1 mesh. // Note that since children have sides numbered // coincident with the parent then this is a sufficient test. if (child->neighbor(ns) == elem) { libmesh_assert (child->active()); libmesh_assert (global_index_map.count(child)); const dof_id_type child_global_index = global_index_map[child]; graph[elem_global_index].push_back(child_global_index); graph_size++; } } } #endif /* ifdef LIBMESH_ENABLE_AMR */ } } } // Convert the graph into the format Metis wants xadj.reserve(n_active_elem+1); adjncy.reserve(graph_size); for (std::size_t r=0; r<graph.size(); r++) { xadj.push_back(adjncy.size()); std::vector<dof_id_type> graph_row; // build this emtpy graph_row.swap(graph[r]); // this will deallocate at the end of scope adjncy.insert(adjncy.end(), graph_row.begin(), graph_row.end()); } // The end of the adjacency array for the last elem xadj.push_back(adjncy.size()); libmesh_assert_equal_to (adjncy.size(), graph_size); libmesh_assert_equal_to (xadj.size(), n_active_elem+1); } // done building the graph if (adjncy.empty()) adjncy.push_back(0); int ncon = 1; // Select which type of partitioning to create // Use recursive if the number of partitions is less than or equal to 8 if (n_pieces <= 8) Metis::METIS_PartGraphRecursive(&n, &ncon, &xadj[0], &adjncy[0], &vwgt[0], NULL, NULL, &nparts, NULL, NULL, NULL, &edgecut, &part[0]); // Otherwise use kway else Metis::METIS_PartGraphKway(&n, &ncon, &xadj[0], &adjncy[0], &vwgt[0], NULL, NULL, &nparts, NULL, NULL, NULL, &edgecut, &part[0]); // Assign the returned processor ids. The part array contains // the processor id for each active element, but in terms of // the contiguous indexing we defined above { MeshBase::element_iterator it = mesh.active_elements_begin(); const MeshBase::element_iterator end = mesh.active_elements_end(); for (; it!=end; ++it) { Elem* elem = *it; libmesh_assert (global_index_map.count(elem)); const dof_id_type elem_global_index = global_index_map[elem]; libmesh_assert_less (elem_global_index, part.size()); const processor_id_type elem_procid = static_cast<processor_id_type>(part[elem_global_index]); elem->processor_id() = elem_procid; } } STOP_LOG("partition()", "MetisPartitioner"); #endif }