Beispiel #1
0
void LocationMap<T>::init(MeshBase & mesh)
{
  // This function must be run on all processors at once
  // for non-serial meshes
  if (!mesh.is_serial())
    libmesh_parallel_only(mesh.comm());

  START_LOG("init()", "LocationMap");

  // Clear the old map
  _map.clear();

  // Cache a bounding box
  _lower_bound.clear();
  _lower_bound.resize(LIBMESH_DIM, std::numeric_limits<Real>::max());
  _upper_bound.clear();
  _upper_bound.resize(LIBMESH_DIM, -std::numeric_limits<Real>::max());

  MeshBase::node_iterator       it  = mesh.nodes_begin();
  const MeshBase::node_iterator end = mesh.nodes_end();

  for (; it != end; ++it)
    {
      Node * node = *it;

      for (unsigned int i=0; i != LIBMESH_DIM; ++i)
        {
          // Expand the bounding box if necessary
          _lower_bound[i] = std::min(_lower_bound[i],
                                     (*node)(i));
          _upper_bound[i] = std::max(_upper_bound[i],
                                     (*node)(i));
        }
    }

  // On a parallel mesh we might not yet have a full bounding box
  if (!mesh.is_serial())
    {
      mesh.comm().min(_lower_bound);
      mesh.comm().max(_upper_bound);
    }

  this->fill(mesh);

  STOP_LOG("init()", "LocationMap");
}
Beispiel #2
0
void Partitioner::partition (MeshBase & mesh,
                             const unsigned int n)
{
  libmesh_parallel_only(mesh.comm());

  // BSK - temporary fix while redistribution is integrated 6/26/2008
  // Uncomment this to not repartition in parallel
  //   if (!mesh.is_serial())
  //     return;

  // we cannot partition into more pieces than we have
  // active elements!
  const unsigned int n_parts =
    static_cast<unsigned int>
    (std::min(mesh.n_active_elem(), static_cast<dof_id_type>(n)));

  // Set the number of partitions in the mesh
  mesh.set_n_partitions()=n_parts;

  if (n_parts == 1)
    {
      this->single_partition (mesh);
      return;
    }

  // First assign a temporary partitioning to any unpartitioned elements
  Partitioner::partition_unpartitioned_elements(mesh, n_parts);

  // Call the partitioning function
  this->_do_partition(mesh,n_parts);

  // Set the parent's processor ids
  Partitioner::set_parent_processor_ids(mesh);

  // Redistribute elements if necessary, before setting node processor
  // ids, to make sure those will be set consistently
  mesh.redistribute();

#ifdef DEBUG
  MeshTools::libmesh_assert_valid_remote_elems(mesh);

  // Messed up elem processor_id()s can leave us without the child
  // elements we need to restrict vectors on a distributed mesh
  MeshTools::libmesh_assert_valid_procids<Elem>(mesh);
#endif

  // Set the node's processor ids
  Partitioner::set_node_processor_ids(mesh);

#ifdef DEBUG
  MeshTools::libmesh_assert_valid_procids<Elem>(mesh);
#endif

  // Give derived Mesh classes a chance to update any cached data to
  // reflect the new partitioning
  mesh.update_post_partitioning();
}
Beispiel #3
0
 MeshSerializer::MeshSerializer(MeshBase& mesh, bool need_serial) :
   _mesh(mesh),
   reparallelize(false)
 {
   libmesh_parallel_only(mesh.comm());
   if (need_serial && !_mesh.is_serial()) {
     reparallelize = true;
     _mesh.allgather();
   }
 }
Beispiel #4
0
void integrate_function (const MeshBase &mesh)
{
#if defined(LIBMESH_HAVE_TRIANGLE) && defined(LIBMESH_HAVE_TETGEN)
    MeshBase::const_element_iterator       el     = mesh.active_local_elements_begin();
    const MeshBase::const_element_iterator end_el = mesh.active_local_elements_end();

    std::vector<Real> vertex_distance;

    QComposite<QGauss> qrule (mesh.mesh_dimension(), FIRST);
    //QGauss qrule (mesh.mesh_dimension(), FIRST);

    UniquePtr<FEBase> fe (FEBase::build (mesh.mesh_dimension(), FEType (FIRST, LAGRANGE)));

    Real int_val=0.;

    const std::vector<Point> &q_points = fe->get_xyz();
    const std::vector<Real>  &JxW      = fe->get_JxW();

    for (; el!=end_el; ++el)
    {
        const Elem *elem = *el;

        vertex_distance.clear();

        for (unsigned int v=0; v<elem->n_vertices(); v++)
            vertex_distance.push_back (distance(elem->point(v)));

        qrule.init (*elem, vertex_distance);

        fe->reinit (elem,
                    &(qrule.get_points()),
                    &(qrule.get_weights()));


        // TODO:  would it be valuable to have the composite quadrature rule sort
        // from smallest to largest JxW value to help prevent
        // ... large + small + large + large + small ...
        // type truncation errors?
        for (unsigned int qp=0; qp<q_points.size(); qp++)
            int_val += JxW[qp] * integrand(q_points[qp]);
    }

    mesh.comm().sum (int_val);

    std::cout  << "\n***********************************\n"
               << " int_val   = " << int_val << std::endl
               << " exact_val = " <<  1*(2*2 - radius*radius*pi) + 10.*(radius*radius*pi)
               << "\n***********************************\n"
               << std::endl;
#else
    libmesh_ignore(mesh);
#endif
}
Beispiel #5
0
void LocationMap<T>::init(MeshBase & mesh)
{
  // This function must be run on all processors at once
  // for non-serial meshes
  if (!mesh.is_serial())
    libmesh_parallel_only(mesh.comm());

  LOG_SCOPE("init()", "LocationMap");

  // Clear the old map
  _map.clear();

  // Cache a bounding box
  _lower_bound.clear();
  _lower_bound.resize(LIBMESH_DIM, std::numeric_limits<Real>::max());
  _upper_bound.clear();
  _upper_bound.resize(LIBMESH_DIM, -std::numeric_limits<Real>::max());

  for (auto & node : mesh.node_ptr_range())
    for (unsigned int i=0; i != LIBMESH_DIM; ++i)
      {
        // Expand the bounding box if necessary
        _lower_bound[i] = std::min(_lower_bound[i],
                                   (*node)(i));
        _upper_bound[i] = std::max(_upper_bound[i],
                                   (*node)(i));
      }

  // On a parallel mesh we might not yet have a full bounding box
  if (!mesh.is_serial())
    {
      mesh.comm().min(_lower_bound);
      mesh.comm().max(_upper_bound);
    }

  this->fill(mesh);
}
Beispiel #6
0
//--------------------------------------------------------------------------
void TopologyMap::init(MeshBase& mesh)
{
  // This function must be run on all processors at once
  // for non-serial meshes
  if (!mesh.is_serial())
    libmesh_parallel_only(mesh.comm());

  START_LOG("init()", "TopologyMap");

  // Clear the old map
  _map.clear();

  this->fill(mesh);

  STOP_LOG("init()", "TopologyMap");
}
Beispiel #7
0
void Partitioner::set_node_processor_ids(MeshBase & mesh)
{
  LOG_SCOPE("set_node_processor_ids()","Partitioner");

  // This function must be run on all processors at once
  libmesh_parallel_only(mesh.comm());

  // If we have any unpartitioned elements at this
  // stage there is a problem
  libmesh_assert (MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
                                    mesh.unpartitioned_elements_end()) == 0);


  //   const dof_id_type orig_n_local_nodes = mesh.n_local_nodes();

  //   libMesh::err << "[" << mesh.processor_id() << "]: orig_n_local_nodes="
  //     << orig_n_local_nodes << std::endl;

  // Build up request sets.  Each node is currently owned by a processor because
  // it is connected to an element owned by that processor.  However, during the
  // repartitioning phase that element may have been assigned a new processor id, but
  // it is still resident on the original processor.  We need to know where to look
  // for new ids before assigning new ids, otherwise we may be asking the wrong processors
  // for the wrong information.
  //
  // The only remaining issue is what to do with unpartitioned nodes.  Since they are required
  // to live on all processors we can simply rely on ourselves to number them properly.
  std::vector<std::vector<dof_id_type> >
    requested_node_ids(mesh.n_processors());

  // Loop over all the nodes, count the ones on each processor.  We can skip ourself
  std::vector<dof_id_type> ghost_nodes_from_proc(mesh.n_processors(), 0);

  MeshBase::node_iterator       node_it  = mesh.nodes_begin();
  const MeshBase::node_iterator node_end = mesh.nodes_end();

  for (; node_it != node_end; ++node_it)
    {
      Node * node = *node_it;
      libmesh_assert(node);
      const processor_id_type current_pid = node->processor_id();
      if (current_pid != mesh.processor_id() &&
          current_pid != DofObject::invalid_processor_id)
        {
          libmesh_assert_less (current_pid, ghost_nodes_from_proc.size());
          ghost_nodes_from_proc[current_pid]++;
        }
    }

  // We know how many objects live on each processor, so reserve()
  // space for each.
  for (processor_id_type pid=0; pid != mesh.n_processors(); ++pid)
    requested_node_ids[pid].reserve(ghost_nodes_from_proc[pid]);

  // We need to get the new pid for each node from the processor
  // which *currently* owns the node.  We can safely skip ourself
  for (node_it = mesh.nodes_begin(); node_it != node_end; ++node_it)
    {
      Node * node = *node_it;
      libmesh_assert(node);
      const processor_id_type current_pid = node->processor_id();
      if (current_pid != mesh.processor_id() &&
          current_pid != DofObject::invalid_processor_id)
        {
          libmesh_assert_less (current_pid, requested_node_ids.size());
          libmesh_assert_less (requested_node_ids[current_pid].size(),
                               ghost_nodes_from_proc[current_pid]);
          requested_node_ids[current_pid].push_back(node->id());
        }

      // Unset any previously-set node processor ids
      node->invalidate_processor_id();
    }

  // Loop over all the active elements
  MeshBase::element_iterator       elem_it  = mesh.active_elements_begin();
  const MeshBase::element_iterator elem_end = mesh.active_elements_end();

  for ( ; elem_it != elem_end; ++elem_it)
    {
      Elem * elem = *elem_it;
      libmesh_assert(elem);

      libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);

      // For each node, set the processor ID to the min of
      // its current value and this Element's processor id.
      //
      // TODO: we would probably get better parallel partitioning if
      // we did something like "min for even numbered nodes, max for
      // odd numbered".  We'd need to be careful about how that would
      // affect solution ordering for I/O, though.
      for (unsigned int n=0; n<elem->n_nodes(); ++n)
        elem->node_ptr(n)->processor_id() = std::min(elem->node_ptr(n)->processor_id(),
                                                     elem->processor_id());
    }

  // And loop over the subactive elements, but don't reassign
  // nodes that are already active on another processor.
  MeshBase::element_iterator       sub_it  = mesh.subactive_elements_begin();
  const MeshBase::element_iterator sub_end = mesh.subactive_elements_end();

  for ( ; sub_it != sub_end; ++sub_it)
    {
      Elem * elem = *sub_it;
      libmesh_assert(elem);

      libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);

      for (unsigned int n=0; n<elem->n_nodes(); ++n)
        if (elem->node_ptr(n)->processor_id() == DofObject::invalid_processor_id)
          elem->node_ptr(n)->processor_id() = elem->processor_id();
    }

  // Same for the inactive elements -- we will have already gotten most of these
  // nodes, *except* for the case of a parent with a subset of children which are
  // ghost elements.  In that case some of the parent nodes will not have been
  // properly handled yet
  MeshBase::element_iterator       not_it  = mesh.not_active_elements_begin();
  const MeshBase::element_iterator not_end = mesh.not_active_elements_end();

  for ( ; not_it != not_end; ++not_it)
    {
      Elem * elem = *not_it;
      libmesh_assert(elem);

      libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);

      for (unsigned int n=0; n<elem->n_nodes(); ++n)
        if (elem->node_ptr(n)->processor_id() == DofObject::invalid_processor_id)
          elem->node_ptr(n)->processor_id() = elem->processor_id();
    }

  // We can't assert that all nodes are connected to elements, because
  // a DistributedMesh with NodeConstraints might have pulled in some
  // remote nodes solely for evaluating those constraints.
  // MeshTools::libmesh_assert_connected_nodes(mesh);

  // For such nodes, we'll do a sanity check later when making sure
  // that we successfully reset their processor ids to something
  // valid.

  // Next set node ids from other processors, excluding self
  for (processor_id_type p=1; p != mesh.n_processors(); ++p)
    {
      // Trade my requests with processor procup and procdown
      processor_id_type procup = cast_int<processor_id_type>
        ((mesh.processor_id() + p) % mesh.n_processors());
      processor_id_type procdown = cast_int<processor_id_type>
        ((mesh.n_processors() + mesh.processor_id() - p) %
         mesh.n_processors());
      std::vector<dof_id_type> request_to_fill;
      mesh.comm().send_receive(procup, requested_node_ids[procup],
                               procdown, request_to_fill);

      // Fill those requests in-place
      for (std::size_t i=0; i != request_to_fill.size(); ++i)
        {
          Node & node = mesh.node_ref(request_to_fill[i]);
          const processor_id_type new_pid = node.processor_id();

          // We may have an invalid processor_id() on nodes that have been
          // "detatched" from coarsened-away elements but that have not yet
          // themselves been removed.
          // libmesh_assert_not_equal_to (new_pid, DofObject::invalid_processor_id);
          // libmesh_assert_less (new_pid, mesh.n_partitions()); // this is the correct test --
          request_to_fill[i] = new_pid;           //  the number of partitions may
        }                                         //  not equal the number of processors

      // Trade back the results
      std::vector<dof_id_type> filled_request;
      mesh.comm().send_receive(procdown, request_to_fill,
                               procup,   filled_request);
      libmesh_assert_equal_to (filled_request.size(), requested_node_ids[procup].size());

      // And copy the id changes we've now been informed of
      for (std::size_t i=0; i != filled_request.size(); ++i)
        {
          Node & node = mesh.node_ref(requested_node_ids[procup][i]);

          // this is the correct test -- the number of partitions may
          // not equal the number of processors

          // But: we may have an invalid processor_id() on nodes that
          // have been "detatched" from coarsened-away elements but
          // that have not yet themselves been removed.
          // libmesh_assert_less (filled_request[i], mesh.n_partitions());

          node.processor_id(cast_int<processor_id_type>(filled_request[i]));
        }
    }

#ifdef DEBUG
  MeshTools::libmesh_assert_valid_procids<Node>(mesh);
#endif
}
Beispiel #8
0
void Partitioner::set_parent_processor_ids(MeshBase & mesh)
{
  // Ignore the parameter when !LIBMESH_ENABLE_AMR
  libmesh_ignore(mesh);

  LOG_SCOPE("set_parent_processor_ids()", "Partitioner");

#ifdef LIBMESH_ENABLE_AMR

  // If the mesh is serial we have access to all the elements,
  // in particular all the active ones.  We can therefore set
  // the parent processor ids indirecly through their children, and
  // set the subactive processor ids while examining their active
  // ancestors.
  // By convention a parent is assigned to the minimum processor
  // of all its children, and a subactive is assigned to the processor
  // of its active ancestor.
  if (mesh.is_serial())
    {
      // Loop over all the active elements in the mesh
      MeshBase::element_iterator       it  = mesh.active_elements_begin();
      const MeshBase::element_iterator end = mesh.active_elements_end();

      for ( ; it!=end; ++it)
        {
          Elem * child  = *it;

          // First set descendents

          std::vector<const Elem *> subactive_family;
          child->total_family_tree(subactive_family);
          for (unsigned int i = 0; i != subactive_family.size(); ++i)
            const_cast<Elem *>(subactive_family[i])->processor_id() = child->processor_id();

          // Then set ancestors

          Elem * parent = child->parent();

          while (parent)
            {
              // invalidate the parent id, otherwise the min below
              // will not work if the current parent id is less
              // than all the children!
              parent->invalidate_processor_id();

              for (unsigned int c=0; c<parent->n_children(); c++)
                {
                  child = parent->child_ptr(c);
                  libmesh_assert(child);
                  libmesh_assert(!child->is_remote());
                  libmesh_assert_not_equal_to (child->processor_id(), DofObject::invalid_processor_id);
                  parent->processor_id() = std::min(parent->processor_id(),
                                                    child->processor_id());
                }
              parent = parent->parent();
            }
        }
    }

  // When the mesh is parallel we cannot guarantee that parents have access to
  // all their children.
  else
    {
      // Setting subactive processor ids is easy: we can guarantee
      // that children have access to all their parents.

      // Loop over all the active elements in the mesh
      MeshBase::element_iterator       it  = mesh.active_elements_begin();
      const MeshBase::element_iterator end = mesh.active_elements_end();

      for ( ; it!=end; ++it)
        {
          Elem * child  = *it;

          std::vector<const Elem *> subactive_family;
          child->total_family_tree(subactive_family);
          for (unsigned int i = 0; i != subactive_family.size(); ++i)
            const_cast<Elem *>(subactive_family[i])->processor_id() = child->processor_id();
        }

      // When the mesh is parallel we cannot guarantee that parents have access to
      // all their children.

      // We will use a brute-force approach here.  Each processor finds its parent
      // elements and sets the parent pid to the minimum of its
      // semilocal descendants.
      // A global reduction is then performed to make sure the true minimum is found.
      // As noted, this is required because we cannot guarantee that a parent has
      // access to all its children on any single processor.
      libmesh_parallel_only(mesh.comm());
      libmesh_assert(MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
                                       mesh.unpartitioned_elements_end()) == 0);

      const dof_id_type max_elem_id = mesh.max_elem_id();

      std::vector<processor_id_type>
        parent_processor_ids (std::min(communication_blocksize,
                                       max_elem_id));

      for (dof_id_type blk=0, last_elem_id=0; last_elem_id<max_elem_id; blk++)
        {
          last_elem_id =
            std::min(static_cast<dof_id_type>((blk+1)*communication_blocksize),
                     max_elem_id);
          const dof_id_type first_elem_id = blk*communication_blocksize;

          std::fill (parent_processor_ids.begin(),
                     parent_processor_ids.end(),
                     DofObject::invalid_processor_id);

          // first build up local contributions to parent_processor_ids
          MeshBase::element_iterator       not_it  = mesh.ancestor_elements_begin();
          const MeshBase::element_iterator not_end = mesh.ancestor_elements_end();

          bool have_parent_in_block = false;

          for ( ; not_it != not_end; ++not_it)
            {
              Elem * parent = *not_it;

              const dof_id_type parent_idx = parent->id();
              libmesh_assert_less (parent_idx, max_elem_id);

              if ((parent_idx >= first_elem_id) &&
                  (parent_idx <  last_elem_id))
                {
                  have_parent_in_block = true;
                  processor_id_type parent_pid = DofObject::invalid_processor_id;

                  std::vector<const Elem *> active_family;
                  parent->active_family_tree(active_family);
                  for (unsigned int i = 0; i != active_family.size(); ++i)
                    parent_pid = std::min (parent_pid, active_family[i]->processor_id());

                  const dof_id_type packed_idx = parent_idx - first_elem_id;
                  libmesh_assert_less (packed_idx, parent_processor_ids.size());

                  parent_processor_ids[packed_idx] = parent_pid;
                }
            }

          // then find the global minimum
          mesh.comm().min (parent_processor_ids);

          // and assign the ids, if we have a parent in this block.
          if (have_parent_in_block)
            for (not_it = mesh.ancestor_elements_begin();
                 not_it != not_end; ++not_it)
              {
                Elem * parent = *not_it;

                const dof_id_type parent_idx = parent->id();

                if ((parent_idx >= first_elem_id) &&
                    (parent_idx <  last_elem_id))
                  {
                    const dof_id_type packed_idx = parent_idx - first_elem_id;
                    libmesh_assert_less (packed_idx, parent_processor_ids.size());

                    const processor_id_type parent_pid =
                      parent_processor_ids[packed_idx];

                    libmesh_assert_not_equal_to (parent_pid, DofObject::invalid_processor_id);

                    parent->processor_id() = parent_pid;
                  }
              }
        }
    }

#endif // LIBMESH_ENABLE_AMR
}
Beispiel #9
0
void Partitioner::partition_unpartitioned_elements (MeshBase & mesh,
                                                    const unsigned int n_subdomains)
{
  MeshBase::element_iterator       it  = mesh.unpartitioned_elements_begin();
  const MeshBase::element_iterator end = mesh.unpartitioned_elements_end();

  const dof_id_type n_unpartitioned_elements = MeshTools::n_elem (it, end);

  // the unpartitioned elements must exist on all processors. If the range is empty on one
  // it is empty on all, and we can quit right here.
  if (!n_unpartitioned_elements) return;

  // find the target subdomain sizes
  std::vector<dof_id_type> subdomain_bounds(mesh.n_processors());

  for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
    {
      dof_id_type tgt_subdomain_size = 0;

      // watch out for the case that n_subdomains < n_processors
      if (pid < n_subdomains)
        {
          tgt_subdomain_size = n_unpartitioned_elements/n_subdomains;

          if (pid < n_unpartitioned_elements%n_subdomains)
            tgt_subdomain_size++;

        }

      //libMesh::out << "pid, #= " << pid << ", " << tgt_subdomain_size << std::endl;
      if (pid == 0)
        subdomain_bounds[0] = tgt_subdomain_size;
      else
        subdomain_bounds[pid] = subdomain_bounds[pid-1] + tgt_subdomain_size;
    }

  libmesh_assert_equal_to (subdomain_bounds.back(), n_unpartitioned_elements);

  // create the unique mapping for all unpartitioned elements independent of partitioning
  // determine the global indexing for all the unpartitoned elements
  std::vector<dof_id_type> global_indices;

  // Calling this on all processors a unique range in [0,n_unpartitioned_elements) is constructed.
  // Only the indices for the elements we pass in are returned in the array.
  MeshCommunication().find_global_indices (mesh.comm(),
                                           MeshTools::bounding_box(mesh), it, end,
                                           global_indices);

  for (dof_id_type cnt=0; it != end; ++it)
    {
      Elem * elem = *it;

      libmesh_assert_less (cnt, global_indices.size());
      const dof_id_type global_index =
        global_indices[cnt++];

      libmesh_assert_less (global_index, subdomain_bounds.back());
      libmesh_assert_less (global_index, n_unpartitioned_elements);

      const processor_id_type subdomain_id =
        cast_int<processor_id_type>
        (std::distance(subdomain_bounds.begin(),
                       std::upper_bound(subdomain_bounds.begin(),
                                        subdomain_bounds.end(),
                                        global_index)));
      libmesh_assert_less (subdomain_id, n_subdomains);

      elem->processor_id() = subdomain_id;
      //libMesh::out << "assigning " << global_index << " to " << subdomain_id << std::endl;
    }
}
Beispiel #10
0
void ParmetisPartitioner::_do_repartition (MeshBase & mesh,
                                           const unsigned int n_sbdmns)
{
  libmesh_assert_greater (n_sbdmns, 0);

  // Check for an easy return
  if (n_sbdmns == 1)
    {
      this->single_partition(mesh);
      return;
    }

  // This function must be run on all processors at once
  libmesh_parallel_only(mesh.comm());

  // What to do if the Parmetis library IS NOT present
#ifndef LIBMESH_HAVE_PARMETIS

  libmesh_here();
  libMesh::err << "ERROR: The library has been built without" << std::endl
               << "Parmetis support.  Using a Metis"          << std::endl
               << "partitioner instead!"                      << std::endl;

  MetisPartitioner mp;

  mp.partition (mesh, n_sbdmns);

  // What to do if the Parmetis library IS present
#else

  // Revert to METIS on one processor.
  if (mesh.n_processors() == 1)
    {
      MetisPartitioner mp;
      mp.partition (mesh, n_sbdmns);
      return;
    }

  LOG_SCOPE("repartition()", "ParmetisPartitioner");

  // Initialize the data structures required by ParMETIS
  this->initialize (mesh, n_sbdmns);

  // Make sure all processors have enough active local elements.
  // Parmetis tends to crash when it's given only a couple elements
  // per partition.
  {
    bool all_have_enough_elements = true;
    for (processor_id_type pid=0; pid<_n_active_elem_on_proc.size(); pid++)
      if (_n_active_elem_on_proc[pid] < MIN_ELEM_PER_PROC)
        all_have_enough_elements = false;

    // Parmetis will not work unless each processor has some
    // elements. Specifically, it will abort when passed a NULL
    // partition array on *any* of the processors.
    if (!all_have_enough_elements)
      {
        // FIXME: revert to METIS, although this requires a serial mesh
        MeshSerializer serialize(mesh);
        MetisPartitioner mp;
        mp.partition (mesh, n_sbdmns);
        return;
      }
  }

  // build the graph corresponding to the mesh
  this->build_graph (mesh);


  // Partition the graph
  std::vector<Parmetis::idx_t> vsize(_pmetis->vwgt.size(), 1);
  Parmetis::real_t itr = 1000000.0;
  MPI_Comm mpi_comm = mesh.comm().get();

  // Call the ParMETIS adaptive repartitioning method.  This respects the
  // original partitioning when computing the new partitioning so as to
  // minimize the required data redistribution.
  Parmetis::ParMETIS_V3_AdaptiveRepart(_pmetis->vtxdist.empty() ? libmesh_nullptr : &_pmetis->vtxdist[0],
                                       _pmetis->xadj.empty()    ? libmesh_nullptr : &_pmetis->xadj[0],
                                       _pmetis->adjncy.empty()  ? libmesh_nullptr : &_pmetis->adjncy[0],
                                       _pmetis->vwgt.empty()    ? libmesh_nullptr : &_pmetis->vwgt[0],
                                       vsize.empty()            ? libmesh_nullptr : &vsize[0],
                                       libmesh_nullptr,
                                       &_pmetis->wgtflag,
                                       &_pmetis->numflag,
                                       &_pmetis->ncon,
                                       &_pmetis->nparts,
                                       _pmetis->tpwgts.empty()  ? libmesh_nullptr : &_pmetis->tpwgts[0],
                                       _pmetis->ubvec.empty()   ? libmesh_nullptr : &_pmetis->ubvec[0],
                                       &itr,
                                       &_pmetis->options[0],
                                       &_pmetis->edgecut,
                                       _pmetis->part.empty()    ? libmesh_nullptr : &_pmetis->part[0],
                                       &mpi_comm);

  // Assign the returned processor ids
  this->assign_partitioning (mesh);

#endif // #ifndef LIBMESH_HAVE_PARMETIS ... else ...

}
Beispiel #11
0
void ParmetisPartitioner::assign_partitioning (MeshBase & mesh)
{
  // This function must be run on all processors at once
  libmesh_parallel_only(mesh.comm());

  const dof_id_type
    first_local_elem = _pmetis->vtxdist[mesh.processor_id()];

  std::vector<std::vector<dof_id_type> >
    requested_ids(mesh.n_processors()),
    requests_to_fill(mesh.n_processors());

  MeshBase::element_iterator elem_it  = mesh.active_elements_begin();
  MeshBase::element_iterator elem_end = mesh.active_elements_end();

  for (; elem_it != elem_end; ++elem_it)
    {
      Elem * elem = *elem_it;

      // we need to get the index from the owning processor
      // (note we cannot assign it now -- we are iterating
      // over elements again and this will be bad!)
      libmesh_assert_less (elem->processor_id(), requested_ids.size());
      requested_ids[elem->processor_id()].push_back(elem->id());
    }

  // Trade with all processors (including self) to get their indices
  for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
    {
      // Trade my requests with processor procup and procdown
      const processor_id_type procup = (mesh.processor_id() + pid) % mesh.n_processors();
      const processor_id_type procdown = (mesh.n_processors() +
                                          mesh.processor_id() - pid) % mesh.n_processors();

      mesh.comm().send_receive (procup,   requested_ids[procup],
                                procdown, requests_to_fill[procdown]);

      // we can overwrite these requested ids in-place.
      for (std::size_t i=0; i<requests_to_fill[procdown].size(); i++)
        {
          const dof_id_type requested_elem_index =
            requests_to_fill[procdown][i];

          libmesh_assert(_global_index_by_pid_map.count(requested_elem_index));

          const dof_id_type global_index_by_pid =
            _global_index_by_pid_map[requested_elem_index];

          const dof_id_type local_index =
            global_index_by_pid - first_local_elem;

          libmesh_assert_less (local_index, _pmetis->part.size());
          libmesh_assert_less (local_index, mesh.n_active_local_elem());

          const unsigned int elem_procid =
            static_cast<unsigned int>(_pmetis->part[local_index]);

          libmesh_assert_less (elem_procid, static_cast<unsigned int>(_pmetis->nparts));

          requests_to_fill[procdown][i] = elem_procid;
        }

      // Trade back
      mesh.comm().send_receive (procdown, requests_to_fill[procdown],
                                procup,   requested_ids[procup]);
    }

  // and finally assign the partitioning.
  // note we are iterating in exactly the same order
  // used to build up the request, so we can expect the
  // required entries to be in the proper sequence.
  elem_it  = mesh.active_elements_begin();
  elem_end = mesh.active_elements_end();

  for (std::vector<unsigned int> counters(mesh.n_processors(), 0);
       elem_it != elem_end; ++elem_it)
    {
      Elem * elem = *elem_it;

      const processor_id_type current_pid = elem->processor_id();

      libmesh_assert_less (counters[current_pid], requested_ids[current_pid].size());

      const processor_id_type elem_procid =
        requested_ids[current_pid][counters[current_pid]++];

      libmesh_assert_less (elem_procid, static_cast<unsigned int>(_pmetis->nparts));
      elem->processor_id() = elem_procid;
    }
}
Beispiel #12
0
void ParmetisPartitioner::initialize (const MeshBase & mesh,
                                      const unsigned int n_sbdmns)
{
  const dof_id_type n_active_local_elem = mesh.n_active_local_elem();

  // Set parameters.
  _pmetis->wgtflag = 2;                                      // weights on vertices only
  _pmetis->ncon    = 1;                                      // one weight per vertex
  _pmetis->numflag = 0;                                      // C-style 0-based numbering
  _pmetis->nparts  = static_cast<Parmetis::idx_t>(n_sbdmns); // number of subdomains to create
  _pmetis->edgecut = 0;                                      // the numbers of edges cut by the
                                                             // partition

  // Initialize data structures for ParMETIS
  _pmetis->vtxdist.resize (mesh.n_processors()+1); std::fill (_pmetis->vtxdist.begin(), _pmetis->vtxdist.end(), 0);
  _pmetis->tpwgts.resize  (_pmetis->nparts);       std::fill (_pmetis->tpwgts.begin(),  _pmetis->tpwgts.end(),  1./_pmetis->nparts);
  _pmetis->ubvec.resize   (_pmetis->ncon);         std::fill (_pmetis->ubvec.begin(),   _pmetis->ubvec.end(),   1.05);
  _pmetis->part.resize    (n_active_local_elem);   std::fill (_pmetis->part.begin(),    _pmetis->part.end(), 0);
  _pmetis->options.resize (5);
  _pmetis->vwgt.resize    (n_active_local_elem);

  // Set the options
  _pmetis->options[0] = 1;  // don't use default options
  _pmetis->options[1] = 0;  // default (level of timing)
  _pmetis->options[2] = 15; // random seed (default)
  _pmetis->options[3] = 2;  // processor distribution and subdomain distribution are decoupled

  // Find the number of active elements on each processor.  We cannot use
  // mesh.n_active_elem_on_proc(pid) since that only returns the number of
  // elements assigned to pid which are currently stored on the calling
  // processor. This will not in general be correct for parallel meshes
  // when (pid!=mesh.processor_id()).
  _n_active_elem_on_proc.resize(mesh.n_processors());
  mesh.comm().allgather(n_active_local_elem, _n_active_elem_on_proc);

  // count the total number of active elements in the mesh.  Note we cannot
  // use mesh.n_active_elem() in general since this only returns the number
  // of active elements which are stored on the calling processor.
  // We should not use n_active_elem for any allocation because that will
  // be inheritly unscalable, but it can be useful for libmesh_assertions.
  dof_id_type n_active_elem=0;

  // Set up the vtxdist array.  This will be the same on each processor.
  // ***** Consult the Parmetis documentation. *****
  libmesh_assert_equal_to (_pmetis->vtxdist.size(),
                           cast_int<std::size_t>(mesh.n_processors()+1));
  libmesh_assert_equal_to (_pmetis->vtxdist[0], 0);

  for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
    {
      _pmetis->vtxdist[pid+1] = _pmetis->vtxdist[pid] + _n_active_elem_on_proc[pid];
      n_active_elem += _n_active_elem_on_proc[pid];
    }
  libmesh_assert_equal_to (_pmetis->vtxdist.back(), static_cast<Parmetis::idx_t>(n_active_elem));

  // ParMetis expects the elements to be numbered in contiguous blocks
  // by processor, i.e. [0, ne0), [ne0, ne0+ne1), ...
  // Since we only partition active elements we should have no expectation
  // that we currently have such a distribution.  So we need to create it.
  // Also, at the same time we are going to map all the active elements into a globally
  // unique range [0,n_active_elem) which is *independent* of the current partitioning.
  // This can be fed to ParMetis as the initial partitioning of the subdomains (decoupled
  // from the partitioning of the objects themselves).  This allows us to get the same
  // resultant partitioning independed of the input partitioning.
  MeshTools::BoundingBox bbox =
    MeshTools::bounding_box(mesh);

  _global_index_by_pid_map.clear();

  // Maps active element ids into a contiguous range independent of partitioning.
  // (only needs local scope)
  vectormap<dof_id_type, dof_id_type> global_index_map;

  {
    std::vector<dof_id_type> global_index;

    // create the mapping which is contiguous by processor
    dof_id_type pid_offset=0;
    for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
      {
        MeshBase::const_element_iterator       it  = mesh.active_pid_elements_begin(pid);
        const MeshBase::const_element_iterator end = mesh.active_pid_elements_end(pid);

        // note that we may not have all (or any!) the active elements which belong on this processor,
        // but by calling this on all processors a unique range in [0,_n_active_elem_on_proc[pid])
        // is constructed.  Only the indices for the elements we pass in are returned in the array.
        MeshCommunication().find_global_indices (mesh.comm(),
                                                 bbox, it, end,
                                                 global_index);

        for (dof_id_type cnt=0; it != end; ++it)
          {
            const Elem * elem = *it;
            libmesh_assert (!_global_index_by_pid_map.count(elem->id()));
            libmesh_assert_less (cnt, global_index.size());
            libmesh_assert_less (global_index[cnt], _n_active_elem_on_proc[pid]);

            _global_index_by_pid_map.insert(std::make_pair(elem->id(), global_index[cnt++] + pid_offset));
          }

        pid_offset += _n_active_elem_on_proc[pid];
      }

    // create the unique mapping for all active elements independent of partitioning
    {
      MeshBase::const_element_iterator       it  = mesh.active_elements_begin();
      const MeshBase::const_element_iterator end = mesh.active_elements_end();

      // Calling this on all processors a unique range in [0,n_active_elem) is constructed.
      // Only the indices for the elements we pass in are returned in the array.
      MeshCommunication().find_global_indices (mesh.comm(),
                                               bbox, it, end,
                                               global_index);

      for (dof_id_type cnt=0; it != end; ++it)
        {
          const Elem * elem = *it;
          libmesh_assert (!global_index_map.count(elem->id()));
          libmesh_assert_less (cnt, global_index.size());
          libmesh_assert_less (global_index[cnt], n_active_elem);

          global_index_map.insert(std::make_pair(elem->id(), global_index[cnt++]));
        }
    }
    // really, shouldn't be close!
    libmesh_assert_less_equal (global_index_map.size(), n_active_elem);
    libmesh_assert_less_equal (_global_index_by_pid_map.size(), n_active_elem);

    // At this point the two maps should be the same size.  If they are not
    // then the number of active elements is not the same as the sum over all
    // processors of the number of active elements per processor, which means
    // there must be some unpartitioned objects out there.
    if (global_index_map.size() != _global_index_by_pid_map.size())
      libmesh_error_msg("ERROR:  ParmetisPartitioner cannot handle unpartitioned objects!");
  }

  // Finally, we need to initialize the vertex (partition) weights and the initial subdomain
  // mapping.  The subdomain mapping will be independent of the processor mapping, and is
  // defined by a simple mapping of the global indices we just found.
  {
    std::vector<dof_id_type> subdomain_bounds(mesh.n_processors());

    const dof_id_type first_local_elem = _pmetis->vtxdist[mesh.processor_id()];

    for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
      {
        dof_id_type tgt_subdomain_size = 0;

        // watch out for the case that n_subdomains < n_processors
        if (pid < static_cast<unsigned int>(_pmetis->nparts))
          {
            tgt_subdomain_size = n_active_elem/std::min
              (cast_int<Parmetis::idx_t>(mesh.n_processors()), _pmetis->nparts);

            if (pid < n_active_elem%_pmetis->nparts)
              tgt_subdomain_size++;
          }
        if (pid == 0)
          subdomain_bounds[0] = tgt_subdomain_size;
        else
          subdomain_bounds[pid] = subdomain_bounds[pid-1] + tgt_subdomain_size;
      }

    libmesh_assert_equal_to (subdomain_bounds.back(), n_active_elem);

    MeshBase::const_element_iterator       elem_it  = mesh.active_local_elements_begin();
    const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end();

    for (; elem_it != elem_end; ++elem_it)
      {
        const Elem * elem = *elem_it;

        libmesh_assert (_global_index_by_pid_map.count(elem->id()));
        const dof_id_type global_index_by_pid =
          _global_index_by_pid_map[elem->id()];
        libmesh_assert_less (global_index_by_pid, n_active_elem);

        const dof_id_type local_index =
          global_index_by_pid - first_local_elem;

        libmesh_assert_less (local_index, n_active_local_elem);
        libmesh_assert_less (local_index, _pmetis->vwgt.size());

        // TODO:[BSK] maybe there is a better weight?
        _pmetis->vwgt[local_index] = elem->n_nodes();

        // find the subdomain this element belongs in
        libmesh_assert (global_index_map.count(elem->id()));
        const dof_id_type global_index =
          global_index_map[elem->id()];

        libmesh_assert_less (global_index, subdomain_bounds.back());

        const unsigned int subdomain_id =
          std::distance(subdomain_bounds.begin(),
                        std::lower_bound(subdomain_bounds.begin(),
                                         subdomain_bounds.end(),
                                         global_index));
        libmesh_assert_less (subdomain_id, static_cast<unsigned int>(_pmetis->nparts));
        libmesh_assert_less (local_index, _pmetis->part.size());

        _pmetis->part[local_index] = subdomain_id;
      }
  }
}
Beispiel #13
0
void MetisPartitioner::partition_range(MeshBase & mesh,
                                       MeshBase::element_iterator beg,
                                       MeshBase::element_iterator end,
                                       unsigned int n_pieces)
{
  libmesh_assert_greater (n_pieces, 0);

  // We don't yet support distributed meshes with this Partitioner
  if (!mesh.is_serial())
    libmesh_not_implemented();

  // Check for an easy return
  if (n_pieces == 1)
    {
      this->single_partition_range (beg, end);
      return;
    }

  // What to do if the Metis library IS NOT present
#ifndef LIBMESH_HAVE_METIS

  libmesh_here();
  libMesh::err << "ERROR: The library has been built without"    << std::endl
               << "Metis support.  Using a space-filling curve"  << std::endl
               << "partitioner instead!"                         << std::endl;

  SFCPartitioner sfcp;
  sfcp.partition_range (mesh, beg, end, n_pieces);

  // What to do if the Metis library IS present
#else

  LOG_SCOPE("partition_range()", "MetisPartitioner");

  const dof_id_type n_range_elem = std::distance(beg, end);

  // Metis will only consider the elements in the range.
  // We need to map the range element ids into a
  // contiguous range.  Further, we want the unique range indexing to be
  // independent of the element ordering, otherwise a circular dependency
  // can result in which the partitioning depends on the ordering which
  // depends on the partitioning...
  vectormap<dof_id_type, dof_id_type> global_index_map;
  global_index_map.reserve (n_range_elem);

  {
    std::vector<dof_id_type> global_index;

    MeshCommunication().find_global_indices (mesh.comm(),
                                             MeshTools::create_bounding_box(mesh),
                                             beg, end, global_index);

    libmesh_assert_equal_to (global_index.size(), n_range_elem);

    MeshBase::element_iterator it = beg;
    for (std::size_t cnt=0; it != end; ++it)
      {
        const Elem * elem = *it;

        global_index_map.insert (std::make_pair(elem->id(), global_index[cnt++]));
      }
    libmesh_assert_equal_to (global_index_map.size(), n_range_elem);
  }

  // If we have boundary elements in this mesh, we want to account for
  // the connectivity between them and interior elements.  We can find
  // interior elements from boundary elements, but we need to build up
  // a lookup map to do the reverse.
  typedef std::unordered_multimap<const Elem *, const Elem *> map_type;
  map_type interior_to_boundary_map;

  {
    MeshBase::element_iterator it = beg;
    for (; it != end; ++it)
      {
        const Elem * elem = *it;

        // If we don't have an interior_parent then there's nothing
        // to look us up.
        if ((elem->dim() >= LIBMESH_DIM) ||
            !elem->interior_parent())
          continue;

        // get all relevant interior elements
        std::set<const Elem *> neighbor_set;
        elem->find_interior_neighbors(neighbor_set);

        std::set<const Elem *>::iterator n_it = neighbor_set.begin();
        for (; n_it != neighbor_set.end(); ++n_it)
          {
            // FIXME - non-const versions of the std::set<const Elem
            // *> returning methods would be nice
            Elem * neighbor = const_cast<Elem *>(*n_it);

#if defined(LIBMESH_HAVE_UNORDERED_MULTIMAP) ||         \
  defined(LIBMESH_HAVE_TR1_UNORDERED_MULTIMAP) ||       \
  defined(LIBMESH_HAVE_HASH_MULTIMAP) ||                \
  defined(LIBMESH_HAVE_EXT_HASH_MULTIMAP)
            interior_to_boundary_map.insert(std::make_pair(neighbor, elem));
#else
            interior_to_boundary_map.insert(interior_to_boundary_map.begin(),
                                            std::make_pair(neighbor, elem));
#endif
          }
      }
  }

  // Data structure that Metis will fill up on processor 0 and broadcast.
  std::vector<Metis::idx_t> part(n_range_elem);

  // Invoke METIS, but only on processor 0.
  // Then broadcast the resulting decomposition
  if (mesh.processor_id() == 0)
    {
      // Data structures and parameters needed only on processor 0 by Metis.
      // std::vector<Metis::idx_t> options(5);
      std::vector<Metis::idx_t> vwgt(n_range_elem);

      Metis::idx_t
        n = static_cast<Metis::idx_t>(n_range_elem),   // number of "nodes" (elements) in the graph
        // wgtflag = 2,                                // weights on vertices only, none on edges
        // numflag = 0,                                // C-style 0-based numbering
        nparts  = static_cast<Metis::idx_t>(n_pieces), // number of subdomains to create
        edgecut = 0;                                   // the numbers of edges cut by the resulting partition

      // Set the options
      // options[0] = 0; // use default options

      // build the graph
      METIS_CSR_Graph<Metis::idx_t> csr_graph;

      csr_graph.offsets.resize(n_range_elem + 1, 0);

      // Local scope for these
      {
        // build the graph in CSR format.  Note that
        // the edges in the graph will correspond to
        // face neighbors

#ifdef LIBMESH_ENABLE_AMR
        std::vector<const Elem *> neighbors_offspring;
#endif

#ifndef NDEBUG
        std::size_t graph_size=0;
#endif

        // (1) first pass - get the row sizes for each element by counting the number
        // of face neighbors.  Also populate the vwght array if necessary
        MeshBase::element_iterator it = beg;
        for (; it != end; ++it)
          {
            const Elem * elem = *it;

            const dof_id_type elem_global_index =
              global_index_map[elem->id()];

            libmesh_assert_less (elem_global_index, vwgt.size());

            // maybe there is a better weight?
            // The weight is used to define what a balanced graph is
            if (!_weights)
              vwgt[elem_global_index] = elem->n_nodes();
            else
              vwgt[elem_global_index] = static_cast<Metis::idx_t>((*_weights)[elem->id()]);

            unsigned int num_neighbors = 0;

            // Loop over the element's neighbors.  An element
            // adjacency corresponds to a face neighbor
            for (auto neighbor : elem->neighbor_ptr_range())
              {
                if (neighbor != libmesh_nullptr)
                  {
                    // If the neighbor is active, but is not in the
                    // range of elements being partitioned, treat it
                    // as a NULL neighbor.
                    if (neighbor->active() && !global_index_map.count(neighbor->id()))
                      continue;

                    // If the neighbor is active treat it
                    // as a connection
                    if (neighbor->active())
                      num_neighbors++;

#ifdef LIBMESH_ENABLE_AMR

                    // Otherwise we need to find all of the
                    // neighbor's children that are connected to
                    // us and add them
                    else
                      {
                        // The side of the neighbor to which
                        // we are connected
                        const unsigned int ns =
                          neighbor->which_neighbor_am_i (elem);
                        libmesh_assert_less (ns, neighbor->n_neighbors());

                        // Get all the active children (& grandchildren, etc...)
                        // of the neighbor.

                        // FIXME - this is the wrong thing, since we
                        // should be getting the active family tree on
                        // our side only.  But adding too many graph
                        // links may cause hanging nodes to tend to be
                        // on partition interiors, which would reduce
                        // communication overhead for constraint
                        // equations, so we'll leave it.
                        neighbor->active_family_tree (neighbors_offspring);

                        // Get all the neighbor's children that
                        // live on that side and are thus connected
                        // to us
                        for (std::size_t nc=0; nc<neighbors_offspring.size(); nc++)
                          {
                            const Elem * child =
                              neighbors_offspring[nc];

                            // Skip neighbor offspring which are not in the range of elements being partitioned.
                            if (!global_index_map.count(child->id()))
                              continue;

                            // This does not assume a level-1 mesh.
                            // Note that since children have sides numbered
                            // coincident with the parent then this is a sufficient test.
                            if (child->neighbor_ptr(ns) == elem)
                              {
                                libmesh_assert (child->active());
                                num_neighbors++;
                              }
                          }
                      }

#endif /* ifdef LIBMESH_ENABLE_AMR */

                  }
              }

            // Check for any interior neighbors
            if ((elem->dim() < LIBMESH_DIM) && elem->interior_parent())
              {
                // get all relevant interior elements
                std::set<const Elem *> neighbor_set;
                elem->find_interior_neighbors(neighbor_set);

                num_neighbors += neighbor_set.size();
              }

            // Check for any boundary neighbors
            typedef map_type::iterator map_it_type;
            std::pair<map_it_type, map_it_type>
              bounds = interior_to_boundary_map.equal_range(elem);
            num_neighbors += std::distance(bounds.first, bounds.second);

            csr_graph.prep_n_nonzeros(elem_global_index, num_neighbors);
#ifndef NDEBUG
            graph_size += num_neighbors;
#endif
          }

        csr_graph.prepare_for_use();

        // (2) second pass - fill the compressed adjacency array
        it = beg;

        for (; it != end; ++it)
          {
            const Elem * elem = *it;

            const dof_id_type elem_global_index =
              global_index_map[elem->id()];

            unsigned int connection=0;

            // Loop over the element's neighbors.  An element
            // adjacency corresponds to a face neighbor
            for (auto neighbor : elem->neighbor_ptr_range())
              {
                if (neighbor != libmesh_nullptr)
                  {
                    // If the neighbor is active, but is not in the
                    // range of elements being partitioned, treat it
                    // as a NULL neighbor.
                    if (neighbor->active() && !global_index_map.count(neighbor->id()))
                      continue;

                    // If the neighbor is active treat it
                    // as a connection
                    if (neighbor->active())
                      csr_graph(elem_global_index, connection++) = global_index_map[neighbor->id()];

#ifdef LIBMESH_ENABLE_AMR

                    // Otherwise we need to find all of the
                    // neighbor's children that are connected to
                    // us and add them
                    else
                      {
                        // The side of the neighbor to which
                        // we are connected
                        const unsigned int ns =
                          neighbor->which_neighbor_am_i (elem);
                        libmesh_assert_less (ns, neighbor->n_neighbors());

                        // Get all the active children (& grandchildren, etc...)
                        // of the neighbor.
                        neighbor->active_family_tree (neighbors_offspring);

                        // Get all the neighbor's children that
                        // live on that side and are thus connected
                        // to us
                        for (std::size_t nc=0; nc<neighbors_offspring.size(); nc++)
                          {
                            const Elem * child =
                              neighbors_offspring[nc];

                            // Skip neighbor offspring which are not in the range of elements being partitioned.
                            if (!global_index_map.count(child->id()))
                              continue;

                            // This does not assume a level-1 mesh.
                            // Note that since children have sides numbered
                            // coincident with the parent then this is a sufficient test.
                            if (child->neighbor_ptr(ns) == elem)
                              {
                                libmesh_assert (child->active());

                                csr_graph(elem_global_index, connection++) = global_index_map[child->id()];
                              }
                          }
                      }

#endif /* ifdef LIBMESH_ENABLE_AMR */

                  }
              }

            if ((elem->dim() < LIBMESH_DIM) &&
                elem->interior_parent())
              {
                // get all relevant interior elements
                std::set<const Elem *> neighbor_set;
                elem->find_interior_neighbors(neighbor_set);

                std::set<const Elem *>::iterator n_it = neighbor_set.begin();
                for (; n_it != neighbor_set.end(); ++n_it)
                  {
                    const Elem * neighbor = *n_it;

                    // Not all interior neighbors are necessarily in
                    // the same Mesh (hence not in the global_index_map).
                    // This will be the case when partitioning a
                    // BoundaryMesh, whose elements all have
                    // interior_parents() that belong to some other
                    // Mesh.
                    const Elem * queried_elem = mesh.query_elem_ptr(neighbor->id());

                    // Compare the neighbor and the queried_elem
                    // pointers, make sure they are the same.
                    if (queried_elem && queried_elem == neighbor)
                      {
                        vectormap<dof_id_type, dof_id_type>::iterator global_index_map_it =
                          global_index_map.find(neighbor->id());

                        // If the interior_neighbor is in the Mesh but
                        // not in the global_index_map, we have other issues.
                        if (global_index_map_it == global_index_map.end())
                          libmesh_error_msg("Interior neighbor with id " << neighbor->id() << " not found in global_index_map.");

                        else
                          csr_graph(elem_global_index, connection++) = global_index_map_it->second;
                      }
                  }
              }

            // Check for any boundary neighbors
            for (const auto & pr : as_range(interior_to_boundary_map.equal_range(elem)))
              {
                const Elem * neighbor = pr.second;
                csr_graph(elem_global_index, connection++) =
                  global_index_map[neighbor->id()];
              }
          }

        // We create a non-empty vals for a disconnected graph, to
        // work around a segfault from METIS.
        libmesh_assert_equal_to (csr_graph.vals.size(),
                                 std::max(graph_size, std::size_t(1)));
      } // done building the graph

      Metis::idx_t ncon = 1;

      // Select which type of partitioning to create

      // Use recursive if the number of partitions is less than or equal to 8
      if (n_pieces <= 8)
        Metis::METIS_PartGraphRecursive(&n,
                                        &ncon,
                                        &csr_graph.offsets[0],
                                        &csr_graph.vals[0],
                                        &vwgt[0],
                                        libmesh_nullptr,
                                        libmesh_nullptr,
                                        &nparts,
                                        libmesh_nullptr,
                                        libmesh_nullptr,
                                        libmesh_nullptr,
                                        &edgecut,
                                        &part[0]);

      // Otherwise  use kway
      else
        Metis::METIS_PartGraphKway(&n,
                                   &ncon,
                                   &csr_graph.offsets[0],
                                   &csr_graph.vals[0],
                                   &vwgt[0],
                                   libmesh_nullptr,
                                   libmesh_nullptr,
                                   &nparts,
                                   libmesh_nullptr,
                                   libmesh_nullptr,
                                   libmesh_nullptr,
                                   &edgecut,
                                   &part[0]);

    } // end processor 0 part

  // Broadcast the resulting partition
  mesh.comm().broadcast(part);

  // Assign the returned processor ids.  The part array contains
  // the processor id for each active element, but in terms of
  // the contiguous indexing we defined above
  {
    MeshBase::element_iterator it = beg;
    for (; it!=end; ++it)
      {
        Elem * elem = *it;

        libmesh_assert (global_index_map.count(elem->id()));

        const dof_id_type elem_global_index =
          global_index_map[elem->id()];

        libmesh_assert_less (elem_global_index, part.size());
        const processor_id_type elem_procid =
          static_cast<processor_id_type>(part[elem_global_index]);

        elem->processor_id() = elem_procid;
      }
  }
#endif
}
Beispiel #14
0
void add_cube_convex_hull_to_mesh(MeshBase& mesh, Point lower_limit, Point upper_limit)
{
#ifdef LIBMESH_HAVE_TETGEN
  SerialMesh cube_mesh(mesh.comm(),3);

  unsigned n_elem = 1;

  MeshTools::Generation::build_cube(cube_mesh,
                                    n_elem,n_elem,n_elem, // n. elements in each direction
                                    lower_limit(0), upper_limit(0),
                                    lower_limit(1), upper_limit(1),
                                    lower_limit(2), upper_limit(2),
                                    HEX8);

  // The pointset_convexhull() algorithm will ignore the Hex8s
  // in the Mesh, and just construct the triangulation
  // of the convex hull.
  TetGenMeshInterface t(cube_mesh);
  t.pointset_convexhull();

  // Now add all nodes from the boundary of the cube_mesh to the input mesh.

  // Map from "node id in cube_mesh" -> "node id in mesh".  Initially inserted
  // with a dummy value, later to be assigned a value by the input mesh.
  std::map<unsigned,unsigned> node_id_map;
  typedef std::map<unsigned,unsigned>::iterator iterator;

  {
    MeshBase::element_iterator it = cube_mesh.elements_begin();
    const MeshBase::element_iterator end = cube_mesh.elements_end();
    for ( ; it != end; ++it)
      {
        Elem* elem = *it;

        for (unsigned s=0; s<elem->n_sides(); ++s)
          if (elem->neighbor(s) == NULL)
            {
              // Add the node IDs of this side to the set
              AutoPtr<Elem> side = elem->side(s);

              for (unsigned n=0; n<side->n_nodes(); ++n)
                node_id_map.insert( std::make_pair(side->node(n), /*dummy_value=*/0) );
            }
      }
  }

  // For each node in the map, insert it into the input mesh and keep
  // track of the ID assigned.
  for (iterator it=node_id_map.begin(); it != node_id_map.end(); ++it)
    {
      // Id of the node in the cube mesh
      unsigned id = (*it).first;

      // Pointer to node in the cube mesh
      Node* old_node = cube_mesh.node_ptr(id);

      // Add geometric point to input mesh
      Node* new_node = mesh.add_point ( *old_node );

      // Track ID value of new_node in map
      (*it).second = new_node->id();
    }

  // With the points added and the map data structure in place, we are
  // ready to add each TRI3 element of the cube_mesh to the input Mesh
  // with proper node assignments
  {
    MeshBase::element_iterator       el     = cube_mesh.elements_begin();
    const MeshBase::element_iterator end_el = cube_mesh.elements_end();

    for (; el != end_el; ++el)
      {
        Elem* old_elem = *el;

        if (old_elem->type() == TRI3)
          {
            Elem* new_elem = mesh.add_elem(new Tri3);

            // Assign nodes in new elements.  Since this is an example,
            // we'll do it in several steps.
            for (unsigned i=0; i<old_elem->n_nodes(); ++i)
              {
                // Locate old node ID in the map
                iterator it = node_id_map.find(old_elem->node(i));

                // Check for not found
                if (it == node_id_map.end())
                  {
                    libMesh::err << "Node id " << old_elem->node(i) << " not found in map!" << std::endl;
                    libmesh_error();
                  }

                // Mapping to node ID in input mesh
                unsigned new_node_id = (*it).second;

                // Node pointer assigned from input mesh
                new_elem->set_node(i) = mesh.node_ptr(new_node_id);
              }
          }
      }
  }
#endif // LIBMESH_HAVE_TETGEN
}