void Partitioner::partition_unpartitioned_elements (MeshBase & mesh, const unsigned int n_subdomains) { MeshBase::element_iterator it = mesh.unpartitioned_elements_begin(); const MeshBase::element_iterator end = mesh.unpartitioned_elements_end(); const dof_id_type n_unpartitioned_elements = MeshTools::n_elem (it, end); // the unpartitioned elements must exist on all processors. If the range is empty on one // it is empty on all, and we can quit right here. if (!n_unpartitioned_elements) return; // find the target subdomain sizes std::vector<dof_id_type> subdomain_bounds(mesh.n_processors()); for (processor_id_type pid=0; pid<mesh.n_processors(); pid++) { dof_id_type tgt_subdomain_size = 0; // watch out for the case that n_subdomains < n_processors if (pid < n_subdomains) { tgt_subdomain_size = n_unpartitioned_elements/n_subdomains; if (pid < n_unpartitioned_elements%n_subdomains) tgt_subdomain_size++; } //libMesh::out << "pid, #= " << pid << ", " << tgt_subdomain_size << std::endl; if (pid == 0) subdomain_bounds[0] = tgt_subdomain_size; else subdomain_bounds[pid] = subdomain_bounds[pid-1] + tgt_subdomain_size; } libmesh_assert_equal_to (subdomain_bounds.back(), n_unpartitioned_elements); // create the unique mapping for all unpartitioned elements independent of partitioning // determine the global indexing for all the unpartitoned elements std::vector<dof_id_type> global_indices; // Calling this on all processors a unique range in [0,n_unpartitioned_elements) is constructed. // Only the indices for the elements we pass in are returned in the array. MeshCommunication().find_global_indices (mesh.comm(), MeshTools::bounding_box(mesh), it, end, global_indices); for (dof_id_type cnt=0; it != end; ++it) { Elem * elem = *it; libmesh_assert_less (cnt, global_indices.size()); const dof_id_type global_index = global_indices[cnt++]; libmesh_assert_less (global_index, subdomain_bounds.back()); libmesh_assert_less (global_index, n_unpartitioned_elements); const processor_id_type subdomain_id = cast_int<processor_id_type> (std::distance(subdomain_bounds.begin(), std::upper_bound(subdomain_bounds.begin(), subdomain_bounds.end(), global_index))); libmesh_assert_less (subdomain_id, n_subdomains); elem->processor_id() = subdomain_id; //libMesh::out << "assigning " << global_index << " to " << subdomain_id << std::endl; } }
void ParmetisPartitioner::initialize (const MeshBase & mesh, const unsigned int n_sbdmns) { const dof_id_type n_active_local_elem = mesh.n_active_local_elem(); // Set parameters. _pmetis->wgtflag = 2; // weights on vertices only _pmetis->ncon = 1; // one weight per vertex _pmetis->numflag = 0; // C-style 0-based numbering _pmetis->nparts = static_cast<Parmetis::idx_t>(n_sbdmns); // number of subdomains to create _pmetis->edgecut = 0; // the numbers of edges cut by the // partition // Initialize data structures for ParMETIS _pmetis->vtxdist.resize (mesh.n_processors()+1); std::fill (_pmetis->vtxdist.begin(), _pmetis->vtxdist.end(), 0); _pmetis->tpwgts.resize (_pmetis->nparts); std::fill (_pmetis->tpwgts.begin(), _pmetis->tpwgts.end(), 1./_pmetis->nparts); _pmetis->ubvec.resize (_pmetis->ncon); std::fill (_pmetis->ubvec.begin(), _pmetis->ubvec.end(), 1.05); _pmetis->part.resize (n_active_local_elem); std::fill (_pmetis->part.begin(), _pmetis->part.end(), 0); _pmetis->options.resize (5); _pmetis->vwgt.resize (n_active_local_elem); // Set the options _pmetis->options[0] = 1; // don't use default options _pmetis->options[1] = 0; // default (level of timing) _pmetis->options[2] = 15; // random seed (default) _pmetis->options[3] = 2; // processor distribution and subdomain distribution are decoupled // Find the number of active elements on each processor. We cannot use // mesh.n_active_elem_on_proc(pid) since that only returns the number of // elements assigned to pid which are currently stored on the calling // processor. This will not in general be correct for parallel meshes // when (pid!=mesh.processor_id()). _n_active_elem_on_proc.resize(mesh.n_processors()); mesh.comm().allgather(n_active_local_elem, _n_active_elem_on_proc); // count the total number of active elements in the mesh. Note we cannot // use mesh.n_active_elem() in general since this only returns the number // of active elements which are stored on the calling processor. // We should not use n_active_elem for any allocation because that will // be inheritly unscalable, but it can be useful for libmesh_assertions. dof_id_type n_active_elem=0; // Set up the vtxdist array. This will be the same on each processor. // ***** Consult the Parmetis documentation. ***** libmesh_assert_equal_to (_pmetis->vtxdist.size(), cast_int<std::size_t>(mesh.n_processors()+1)); libmesh_assert_equal_to (_pmetis->vtxdist[0], 0); for (processor_id_type pid=0; pid<mesh.n_processors(); pid++) { _pmetis->vtxdist[pid+1] = _pmetis->vtxdist[pid] + _n_active_elem_on_proc[pid]; n_active_elem += _n_active_elem_on_proc[pid]; } libmesh_assert_equal_to (_pmetis->vtxdist.back(), static_cast<Parmetis::idx_t>(n_active_elem)); // ParMetis expects the elements to be numbered in contiguous blocks // by processor, i.e. [0, ne0), [ne0, ne0+ne1), ... // Since we only partition active elements we should have no expectation // that we currently have such a distribution. So we need to create it. // Also, at the same time we are going to map all the active elements into a globally // unique range [0,n_active_elem) which is *independent* of the current partitioning. // This can be fed to ParMetis as the initial partitioning of the subdomains (decoupled // from the partitioning of the objects themselves). This allows us to get the same // resultant partitioning independed of the input partitioning. MeshTools::BoundingBox bbox = MeshTools::bounding_box(mesh); _global_index_by_pid_map.clear(); // Maps active element ids into a contiguous range independent of partitioning. // (only needs local scope) vectormap<dof_id_type, dof_id_type> global_index_map; { std::vector<dof_id_type> global_index; // create the mapping which is contiguous by processor dof_id_type pid_offset=0; for (processor_id_type pid=0; pid<mesh.n_processors(); pid++) { MeshBase::const_element_iterator it = mesh.active_pid_elements_begin(pid); const MeshBase::const_element_iterator end = mesh.active_pid_elements_end(pid); // note that we may not have all (or any!) the active elements which belong on this processor, // but by calling this on all processors a unique range in [0,_n_active_elem_on_proc[pid]) // is constructed. Only the indices for the elements we pass in are returned in the array. MeshCommunication().find_global_indices (mesh.comm(), bbox, it, end, global_index); for (dof_id_type cnt=0; it != end; ++it) { const Elem * elem = *it; libmesh_assert (!_global_index_by_pid_map.count(elem->id())); libmesh_assert_less (cnt, global_index.size()); libmesh_assert_less (global_index[cnt], _n_active_elem_on_proc[pid]); _global_index_by_pid_map.insert(std::make_pair(elem->id(), global_index[cnt++] + pid_offset)); } pid_offset += _n_active_elem_on_proc[pid]; } // create the unique mapping for all active elements independent of partitioning { MeshBase::const_element_iterator it = mesh.active_elements_begin(); const MeshBase::const_element_iterator end = mesh.active_elements_end(); // Calling this on all processors a unique range in [0,n_active_elem) is constructed. // Only the indices for the elements we pass in are returned in the array. MeshCommunication().find_global_indices (mesh.comm(), bbox, it, end, global_index); for (dof_id_type cnt=0; it != end; ++it) { const Elem * elem = *it; libmesh_assert (!global_index_map.count(elem->id())); libmesh_assert_less (cnt, global_index.size()); libmesh_assert_less (global_index[cnt], n_active_elem); global_index_map.insert(std::make_pair(elem->id(), global_index[cnt++])); } } // really, shouldn't be close! libmesh_assert_less_equal (global_index_map.size(), n_active_elem); libmesh_assert_less_equal (_global_index_by_pid_map.size(), n_active_elem); // At this point the two maps should be the same size. If they are not // then the number of active elements is not the same as the sum over all // processors of the number of active elements per processor, which means // there must be some unpartitioned objects out there. if (global_index_map.size() != _global_index_by_pid_map.size()) libmesh_error_msg("ERROR: ParmetisPartitioner cannot handle unpartitioned objects!"); } // Finally, we need to initialize the vertex (partition) weights and the initial subdomain // mapping. The subdomain mapping will be independent of the processor mapping, and is // defined by a simple mapping of the global indices we just found. { std::vector<dof_id_type> subdomain_bounds(mesh.n_processors()); const dof_id_type first_local_elem = _pmetis->vtxdist[mesh.processor_id()]; for (processor_id_type pid=0; pid<mesh.n_processors(); pid++) { dof_id_type tgt_subdomain_size = 0; // watch out for the case that n_subdomains < n_processors if (pid < static_cast<unsigned int>(_pmetis->nparts)) { tgt_subdomain_size = n_active_elem/std::min (cast_int<Parmetis::idx_t>(mesh.n_processors()), _pmetis->nparts); if (pid < n_active_elem%_pmetis->nparts) tgt_subdomain_size++; } if (pid == 0) subdomain_bounds[0] = tgt_subdomain_size; else subdomain_bounds[pid] = subdomain_bounds[pid-1] + tgt_subdomain_size; } libmesh_assert_equal_to (subdomain_bounds.back(), n_active_elem); MeshBase::const_element_iterator elem_it = mesh.active_local_elements_begin(); const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end(); for (; elem_it != elem_end; ++elem_it) { const Elem * elem = *elem_it; libmesh_assert (_global_index_by_pid_map.count(elem->id())); const dof_id_type global_index_by_pid = _global_index_by_pid_map[elem->id()]; libmesh_assert_less (global_index_by_pid, n_active_elem); const dof_id_type local_index = global_index_by_pid - first_local_elem; libmesh_assert_less (local_index, n_active_local_elem); libmesh_assert_less (local_index, _pmetis->vwgt.size()); // TODO:[BSK] maybe there is a better weight? _pmetis->vwgt[local_index] = elem->n_nodes(); // find the subdomain this element belongs in libmesh_assert (global_index_map.count(elem->id())); const dof_id_type global_index = global_index_map[elem->id()]; libmesh_assert_less (global_index, subdomain_bounds.back()); const unsigned int subdomain_id = std::distance(subdomain_bounds.begin(), std::lower_bound(subdomain_bounds.begin(), subdomain_bounds.end(), global_index)); libmesh_assert_less (subdomain_id, static_cast<unsigned int>(_pmetis->nparts)); libmesh_assert_less (local_index, _pmetis->part.size()); _pmetis->part[local_index] = subdomain_id; } } }
void CheckpointIO::read (const std::string & name) { LOG_SCOPE("read()","CheckpointIO"); MeshBase & mesh = MeshInput<MeshBase>::mesh(); // Try to dynamic cast the mesh to see if it's a DistributedMesh object // Note: Just using is_serial() is not good enough because the Mesh won't // have been prepared yet when is when that flag gets set to false... sigh. bool parallel_mesh = dynamic_cast<DistributedMesh *>(&mesh); // If this is a serial mesh then we're going to only read it on processor 0 and broadcast it if(parallel_mesh || this->processor_id() == 0) { std::ostringstream file_name_stream; file_name_stream << name; if(parallel_mesh) file_name_stream << "-" << this->processor_id(); { std::ifstream in (file_name_stream.str().c_str()); if (!in.good()) libmesh_error_msg("ERROR: cannot locate specified file:\n\t" << file_name_stream.str()); } Xdr io (file_name_stream.str(), this->binary() ? DECODE : READ); // read the version io.data (_version); // Check if the mesh we're reading is the same as the one that was written { unsigned int parallel; io.data(parallel, "# parallel"); if(static_cast<unsigned int>(parallel_mesh) != parallel) libmesh_error_msg("Attempted to utilize a checkpoint file with an incompatible mesh distribution!"); } // If this is a parallel mesh then we need to check to ensure we're reading this on the same number of procs if(parallel_mesh) { largest_id_type n_procs; io.data(n_procs, "# n_procs"); if(n_procs != this->n_processors()) libmesh_error_msg("Attempted to utilize a checkpoint file on " << this->n_processors() << " processors but it was written using " << n_procs << "!!"); } // read subdomain names this->read_subdomain_names(io); // read the nodal locations this->read_nodes (io); // read connectivity this->read_connectivity (io); // read the boundary conditions this->read_bcs (io); // read the nodesets this->read_nodesets (io); io.close(); } // If the mesh is serial then we only read it on processor 0 so we need to broadcast it if(!parallel_mesh) MeshCommunication().broadcast(mesh); }
void MetisPartitioner::partition_range(MeshBase & mesh, MeshBase::element_iterator beg, MeshBase::element_iterator end, unsigned int n_pieces) { libmesh_assert_greater (n_pieces, 0); // We don't yet support distributed meshes with this Partitioner if (!mesh.is_serial()) libmesh_not_implemented(); // Check for an easy return if (n_pieces == 1) { this->single_partition_range (beg, end); return; } // What to do if the Metis library IS NOT present #ifndef LIBMESH_HAVE_METIS libmesh_here(); libMesh::err << "ERROR: The library has been built without" << std::endl << "Metis support. Using a space-filling curve" << std::endl << "partitioner instead!" << std::endl; SFCPartitioner sfcp; sfcp.partition_range (mesh, beg, end, n_pieces); // What to do if the Metis library IS present #else LOG_SCOPE("partition_range()", "MetisPartitioner"); const dof_id_type n_range_elem = std::distance(beg, end); // Metis will only consider the elements in the range. // We need to map the range element ids into a // contiguous range. Further, we want the unique range indexing to be // independent of the element ordering, otherwise a circular dependency // can result in which the partitioning depends on the ordering which // depends on the partitioning... vectormap<dof_id_type, dof_id_type> global_index_map; global_index_map.reserve (n_range_elem); { std::vector<dof_id_type> global_index; MeshCommunication().find_global_indices (mesh.comm(), MeshTools::create_bounding_box(mesh), beg, end, global_index); libmesh_assert_equal_to (global_index.size(), n_range_elem); MeshBase::element_iterator it = beg; for (std::size_t cnt=0; it != end; ++it) { const Elem * elem = *it; global_index_map.insert (std::make_pair(elem->id(), global_index[cnt++])); } libmesh_assert_equal_to (global_index_map.size(), n_range_elem); } // If we have boundary elements in this mesh, we want to account for // the connectivity between them and interior elements. We can find // interior elements from boundary elements, but we need to build up // a lookup map to do the reverse. typedef std::unordered_multimap<const Elem *, const Elem *> map_type; map_type interior_to_boundary_map; { MeshBase::element_iterator it = beg; for (; it != end; ++it) { const Elem * elem = *it; // If we don't have an interior_parent then there's nothing // to look us up. if ((elem->dim() >= LIBMESH_DIM) || !elem->interior_parent()) continue; // get all relevant interior elements std::set<const Elem *> neighbor_set; elem->find_interior_neighbors(neighbor_set); std::set<const Elem *>::iterator n_it = neighbor_set.begin(); for (; n_it != neighbor_set.end(); ++n_it) { // FIXME - non-const versions of the std::set<const Elem // *> returning methods would be nice Elem * neighbor = const_cast<Elem *>(*n_it); #if defined(LIBMESH_HAVE_UNORDERED_MULTIMAP) || \ defined(LIBMESH_HAVE_TR1_UNORDERED_MULTIMAP) || \ defined(LIBMESH_HAVE_HASH_MULTIMAP) || \ defined(LIBMESH_HAVE_EXT_HASH_MULTIMAP) interior_to_boundary_map.insert(std::make_pair(neighbor, elem)); #else interior_to_boundary_map.insert(interior_to_boundary_map.begin(), std::make_pair(neighbor, elem)); #endif } } } // Data structure that Metis will fill up on processor 0 and broadcast. std::vector<Metis::idx_t> part(n_range_elem); // Invoke METIS, but only on processor 0. // Then broadcast the resulting decomposition if (mesh.processor_id() == 0) { // Data structures and parameters needed only on processor 0 by Metis. // std::vector<Metis::idx_t> options(5); std::vector<Metis::idx_t> vwgt(n_range_elem); Metis::idx_t n = static_cast<Metis::idx_t>(n_range_elem), // number of "nodes" (elements) in the graph // wgtflag = 2, // weights on vertices only, none on edges // numflag = 0, // C-style 0-based numbering nparts = static_cast<Metis::idx_t>(n_pieces), // number of subdomains to create edgecut = 0; // the numbers of edges cut by the resulting partition // Set the options // options[0] = 0; // use default options // build the graph METIS_CSR_Graph<Metis::idx_t> csr_graph; csr_graph.offsets.resize(n_range_elem + 1, 0); // Local scope for these { // build the graph in CSR format. Note that // the edges in the graph will correspond to // face neighbors #ifdef LIBMESH_ENABLE_AMR std::vector<const Elem *> neighbors_offspring; #endif #ifndef NDEBUG std::size_t graph_size=0; #endif // (1) first pass - get the row sizes for each element by counting the number // of face neighbors. Also populate the vwght array if necessary MeshBase::element_iterator it = beg; for (; it != end; ++it) { const Elem * elem = *it; const dof_id_type elem_global_index = global_index_map[elem->id()]; libmesh_assert_less (elem_global_index, vwgt.size()); // maybe there is a better weight? // The weight is used to define what a balanced graph is if (!_weights) vwgt[elem_global_index] = elem->n_nodes(); else vwgt[elem_global_index] = static_cast<Metis::idx_t>((*_weights)[elem->id()]); unsigned int num_neighbors = 0; // Loop over the element's neighbors. An element // adjacency corresponds to a face neighbor for (auto neighbor : elem->neighbor_ptr_range()) { if (neighbor != libmesh_nullptr) { // If the neighbor is active, but is not in the // range of elements being partitioned, treat it // as a NULL neighbor. if (neighbor->active() && !global_index_map.count(neighbor->id())) continue; // If the neighbor is active treat it // as a connection if (neighbor->active()) num_neighbors++; #ifdef LIBMESH_ENABLE_AMR // Otherwise we need to find all of the // neighbor's children that are connected to // us and add them else { // The side of the neighbor to which // we are connected const unsigned int ns = neighbor->which_neighbor_am_i (elem); libmesh_assert_less (ns, neighbor->n_neighbors()); // Get all the active children (& grandchildren, etc...) // of the neighbor. // FIXME - this is the wrong thing, since we // should be getting the active family tree on // our side only. But adding too many graph // links may cause hanging nodes to tend to be // on partition interiors, which would reduce // communication overhead for constraint // equations, so we'll leave it. neighbor->active_family_tree (neighbors_offspring); // Get all the neighbor's children that // live on that side and are thus connected // to us for (std::size_t nc=0; nc<neighbors_offspring.size(); nc++) { const Elem * child = neighbors_offspring[nc]; // Skip neighbor offspring which are not in the range of elements being partitioned. if (!global_index_map.count(child->id())) continue; // This does not assume a level-1 mesh. // Note that since children have sides numbered // coincident with the parent then this is a sufficient test. if (child->neighbor_ptr(ns) == elem) { libmesh_assert (child->active()); num_neighbors++; } } } #endif /* ifdef LIBMESH_ENABLE_AMR */ } } // Check for any interior neighbors if ((elem->dim() < LIBMESH_DIM) && elem->interior_parent()) { // get all relevant interior elements std::set<const Elem *> neighbor_set; elem->find_interior_neighbors(neighbor_set); num_neighbors += neighbor_set.size(); } // Check for any boundary neighbors typedef map_type::iterator map_it_type; std::pair<map_it_type, map_it_type> bounds = interior_to_boundary_map.equal_range(elem); num_neighbors += std::distance(bounds.first, bounds.second); csr_graph.prep_n_nonzeros(elem_global_index, num_neighbors); #ifndef NDEBUG graph_size += num_neighbors; #endif } csr_graph.prepare_for_use(); // (2) second pass - fill the compressed adjacency array it = beg; for (; it != end; ++it) { const Elem * elem = *it; const dof_id_type elem_global_index = global_index_map[elem->id()]; unsigned int connection=0; // Loop over the element's neighbors. An element // adjacency corresponds to a face neighbor for (auto neighbor : elem->neighbor_ptr_range()) { if (neighbor != libmesh_nullptr) { // If the neighbor is active, but is not in the // range of elements being partitioned, treat it // as a NULL neighbor. if (neighbor->active() && !global_index_map.count(neighbor->id())) continue; // If the neighbor is active treat it // as a connection if (neighbor->active()) csr_graph(elem_global_index, connection++) = global_index_map[neighbor->id()]; #ifdef LIBMESH_ENABLE_AMR // Otherwise we need to find all of the // neighbor's children that are connected to // us and add them else { // The side of the neighbor to which // we are connected const unsigned int ns = neighbor->which_neighbor_am_i (elem); libmesh_assert_less (ns, neighbor->n_neighbors()); // Get all the active children (& grandchildren, etc...) // of the neighbor. neighbor->active_family_tree (neighbors_offspring); // Get all the neighbor's children that // live on that side and are thus connected // to us for (std::size_t nc=0; nc<neighbors_offspring.size(); nc++) { const Elem * child = neighbors_offspring[nc]; // Skip neighbor offspring which are not in the range of elements being partitioned. if (!global_index_map.count(child->id())) continue; // This does not assume a level-1 mesh. // Note that since children have sides numbered // coincident with the parent then this is a sufficient test. if (child->neighbor_ptr(ns) == elem) { libmesh_assert (child->active()); csr_graph(elem_global_index, connection++) = global_index_map[child->id()]; } } } #endif /* ifdef LIBMESH_ENABLE_AMR */ } } if ((elem->dim() < LIBMESH_DIM) && elem->interior_parent()) { // get all relevant interior elements std::set<const Elem *> neighbor_set; elem->find_interior_neighbors(neighbor_set); std::set<const Elem *>::iterator n_it = neighbor_set.begin(); for (; n_it != neighbor_set.end(); ++n_it) { const Elem * neighbor = *n_it; // Not all interior neighbors are necessarily in // the same Mesh (hence not in the global_index_map). // This will be the case when partitioning a // BoundaryMesh, whose elements all have // interior_parents() that belong to some other // Mesh. const Elem * queried_elem = mesh.query_elem_ptr(neighbor->id()); // Compare the neighbor and the queried_elem // pointers, make sure they are the same. if (queried_elem && queried_elem == neighbor) { vectormap<dof_id_type, dof_id_type>::iterator global_index_map_it = global_index_map.find(neighbor->id()); // If the interior_neighbor is in the Mesh but // not in the global_index_map, we have other issues. if (global_index_map_it == global_index_map.end()) libmesh_error_msg("Interior neighbor with id " << neighbor->id() << " not found in global_index_map."); else csr_graph(elem_global_index, connection++) = global_index_map_it->second; } } } // Check for any boundary neighbors for (const auto & pr : as_range(interior_to_boundary_map.equal_range(elem))) { const Elem * neighbor = pr.second; csr_graph(elem_global_index, connection++) = global_index_map[neighbor->id()]; } } // We create a non-empty vals for a disconnected graph, to // work around a segfault from METIS. libmesh_assert_equal_to (csr_graph.vals.size(), std::max(graph_size, std::size_t(1))); } // done building the graph Metis::idx_t ncon = 1; // Select which type of partitioning to create // Use recursive if the number of partitions is less than or equal to 8 if (n_pieces <= 8) Metis::METIS_PartGraphRecursive(&n, &ncon, &csr_graph.offsets[0], &csr_graph.vals[0], &vwgt[0], libmesh_nullptr, libmesh_nullptr, &nparts, libmesh_nullptr, libmesh_nullptr, libmesh_nullptr, &edgecut, &part[0]); // Otherwise use kway else Metis::METIS_PartGraphKway(&n, &ncon, &csr_graph.offsets[0], &csr_graph.vals[0], &vwgt[0], libmesh_nullptr, libmesh_nullptr, &nparts, libmesh_nullptr, libmesh_nullptr, libmesh_nullptr, &edgecut, &part[0]); } // end processor 0 part // Broadcast the resulting partition mesh.comm().broadcast(part); // Assign the returned processor ids. The part array contains // the processor id for each active element, but in terms of // the contiguous indexing we defined above { MeshBase::element_iterator it = beg; for (; it!=end; ++it) { Elem * elem = *it; libmesh_assert (global_index_map.count(elem->id())); const dof_id_type elem_global_index = global_index_map[elem->id()]; libmesh_assert_less (elem_global_index, part.size()); const processor_id_type elem_procid = static_cast<processor_id_type>(part[elem_global_index]); elem->processor_id() = elem_procid; } } #endif }
// ------------------------------------------------------------ // MetisPartitioner implementation void MetisPartitioner::_do_partition (MeshBase& mesh, const unsigned int n_pieces) { libmesh_assert_greater (n_pieces, 0); libmesh_assert (mesh.is_serial()); // Check for an easy return if (n_pieces == 1) { this->single_partition (mesh); return; } // What to do if the Metis library IS NOT present #ifndef LIBMESH_HAVE_METIS libmesh_here(); libMesh::err << "ERROR: The library has been built without" << std::endl << "Metis support. Using a space-filling curve" << std::endl << "partitioner instead!" << std::endl; SFCPartitioner sfcp; sfcp.partition (mesh, n_pieces); // What to do if the Metis library IS present #else START_LOG("partition()", "MetisPartitioner"); const dof_id_type n_active_elem = mesh.n_active_elem(); // build the graph // std::vector<int> options(5); std::vector<int> vwgt(n_active_elem); std::vector<int> part(n_active_elem); int n = static_cast<int>(n_active_elem), // number of "nodes" (elements) // in the graph // wgtflag = 2, // weights on vertices only, // // none on edges // numflag = 0, // C-style 0-based numbering nparts = static_cast<int>(n_pieces), // number of subdomains to create edgecut = 0; // the numbers of edges cut by the // resulting partition // Set the options // options[0] = 0; // use default options // Metis will only consider the active elements. // We need to map the active element ids into a // contiguous range. Further, we want the unique range indexing to be // independednt of the element ordering, otherwise a circular dependency // can result in which the partitioning depends on the ordering which // depends on the partitioning... std::map<const Elem*, dof_id_type> global_index_map; { std::vector<dof_id_type> global_index; MeshBase::element_iterator it = mesh.active_elements_begin(); const MeshBase::element_iterator end = mesh.active_elements_end(); MeshCommunication().find_global_indices (MeshTools::bounding_box(mesh), it, end, global_index); libmesh_assert_equal_to (global_index.size(), n_active_elem); for (std::size_t cnt=0; it != end; ++it) { const Elem *elem = *it; libmesh_assert (!global_index_map.count(elem)); global_index_map[elem] = global_index[cnt++]; } libmesh_assert_equal_to (global_index_map.size(), n_active_elem); } // build the graph in CSR format. Note that // the edges in the graph will correspond to // face neighbors std::vector<int> xadj, adjncy; { std::vector<const Elem*> neighbors_offspring; MeshBase::element_iterator elem_it = mesh.active_elements_begin(); const MeshBase::element_iterator elem_end = mesh.active_elements_end(); // This will be exact when there is no refinement and all the // elements are of the same type. std::size_t graph_size=0; std::vector<std::vector<dof_id_type> > graph(n_active_elem); for (; elem_it != elem_end; ++elem_it) { const Elem* elem = *elem_it; libmesh_assert (global_index_map.count(elem)); const dof_id_type elem_global_index = global_index_map[elem]; libmesh_assert_less (elem_global_index, vwgt.size()); libmesh_assert_less (elem_global_index, graph.size()); // maybe there is a better weight? // The weight is used to define what a balanced graph is if(!_weights) vwgt[elem_global_index] = elem->n_nodes(); else vwgt[elem_global_index] = static_cast<int>((*_weights)[elem->id()]); // Loop over the element's neighbors. An element // adjacency corresponds to a face neighbor for (unsigned int ms=0; ms<elem->n_neighbors(); ms++) { const Elem* neighbor = elem->neighbor(ms); if (neighbor != NULL) { // If the neighbor is active treat it // as a connection if (neighbor->active()) { libmesh_assert (global_index_map.count(neighbor)); const dof_id_type neighbor_global_index = global_index_map[neighbor]; graph[elem_global_index].push_back(neighbor_global_index); graph_size++; } #ifdef LIBMESH_ENABLE_AMR // Otherwise we need to find all of the // neighbor's children that are connected to // us and add them else { // The side of the neighbor to which // we are connected const unsigned int ns = neighbor->which_neighbor_am_i (elem); libmesh_assert_less (ns, neighbor->n_neighbors()); // Get all the active children (& grandchildren, etc...) // of the neighbor. neighbor->active_family_tree (neighbors_offspring); // Get all the neighbor's children that // live on that side and are thus connected // to us for (unsigned int nc=0; nc<neighbors_offspring.size(); nc++) { const Elem* child = neighbors_offspring[nc]; // This does not assume a level-1 mesh. // Note that since children have sides numbered // coincident with the parent then this is a sufficient test. if (child->neighbor(ns) == elem) { libmesh_assert (child->active()); libmesh_assert (global_index_map.count(child)); const dof_id_type child_global_index = global_index_map[child]; graph[elem_global_index].push_back(child_global_index); graph_size++; } } } #endif /* ifdef LIBMESH_ENABLE_AMR */ } } } // Convert the graph into the format Metis wants xadj.reserve(n_active_elem+1); adjncy.reserve(graph_size); for (std::size_t r=0; r<graph.size(); r++) { xadj.push_back(adjncy.size()); std::vector<dof_id_type> graph_row; // build this emtpy graph_row.swap(graph[r]); // this will deallocate at the end of scope adjncy.insert(adjncy.end(), graph_row.begin(), graph_row.end()); } // The end of the adjacency array for the last elem xadj.push_back(adjncy.size()); libmesh_assert_equal_to (adjncy.size(), graph_size); libmesh_assert_equal_to (xadj.size(), n_active_elem+1); } // done building the graph if (adjncy.empty()) adjncy.push_back(0); int ncon = 1; // Select which type of partitioning to create // Use recursive if the number of partitions is less than or equal to 8 if (n_pieces <= 8) Metis::METIS_PartGraphRecursive(&n, &ncon, &xadj[0], &adjncy[0], &vwgt[0], NULL, NULL, &nparts, NULL, NULL, NULL, &edgecut, &part[0]); // Otherwise use kway else Metis::METIS_PartGraphKway(&n, &ncon, &xadj[0], &adjncy[0], &vwgt[0], NULL, NULL, &nparts, NULL, NULL, NULL, &edgecut, &part[0]); // Assign the returned processor ids. The part array contains // the processor id for each active element, but in terms of // the contiguous indexing we defined above { MeshBase::element_iterator it = mesh.active_elements_begin(); const MeshBase::element_iterator end = mesh.active_elements_end(); for (; it!=end; ++it) { Elem* elem = *it; libmesh_assert (global_index_map.count(elem)); const dof_id_type elem_global_index = global_index_map[elem]; libmesh_assert_less (elem_global_index, part.size()); const processor_id_type elem_procid = static_cast<processor_id_type>(part[elem_global_index]); elem->processor_id() = elem_procid; } } STOP_LOG("partition()", "MetisPartitioner"); #endif }