Exemple #1
0
void Partitioner::single_partition_range (MeshBase::element_iterator it,
                                          MeshBase::element_iterator end)
{
  LOG_SCOPE("single_partition_range()", "Partitioner");

  for ( ; it != end; ++it)
    {
      Elem * elem = *it;
      elem->processor_id() = 0;

      // Assign all this element's nodes to processor 0 as well.
      for (unsigned int n=0; n<elem->n_nodes(); ++n)
        elem->node_ptr(n)->processor_id() = 0;
    }
}
Exemple #2
0
void Partitioner::set_node_processor_ids(MeshBase & mesh)
{
  LOG_SCOPE("set_node_processor_ids()","Partitioner");

  // This function must be run on all processors at once
  libmesh_parallel_only(mesh.comm());

  // If we have any unpartitioned elements at this
  // stage there is a problem
  libmesh_assert (MeshTools::n_elem(mesh.unpartitioned_elements_begin(),
                                    mesh.unpartitioned_elements_end()) == 0);


  //   const dof_id_type orig_n_local_nodes = mesh.n_local_nodes();

  //   libMesh::err << "[" << mesh.processor_id() << "]: orig_n_local_nodes="
  //     << orig_n_local_nodes << std::endl;

  // Build up request sets.  Each node is currently owned by a processor because
  // it is connected to an element owned by that processor.  However, during the
  // repartitioning phase that element may have been assigned a new processor id, but
  // it is still resident on the original processor.  We need to know where to look
  // for new ids before assigning new ids, otherwise we may be asking the wrong processors
  // for the wrong information.
  //
  // The only remaining issue is what to do with unpartitioned nodes.  Since they are required
  // to live on all processors we can simply rely on ourselves to number them properly.
  std::vector<std::vector<dof_id_type> >
    requested_node_ids(mesh.n_processors());

  // Loop over all the nodes, count the ones on each processor.  We can skip ourself
  std::vector<dof_id_type> ghost_nodes_from_proc(mesh.n_processors(), 0);

  MeshBase::node_iterator       node_it  = mesh.nodes_begin();
  const MeshBase::node_iterator node_end = mesh.nodes_end();

  for (; node_it != node_end; ++node_it)
    {
      Node * node = *node_it;
      libmesh_assert(node);
      const processor_id_type current_pid = node->processor_id();
      if (current_pid != mesh.processor_id() &&
          current_pid != DofObject::invalid_processor_id)
        {
          libmesh_assert_less (current_pid, ghost_nodes_from_proc.size());
          ghost_nodes_from_proc[current_pid]++;
        }
    }

  // We know how many objects live on each processor, so reserve()
  // space for each.
  for (processor_id_type pid=0; pid != mesh.n_processors(); ++pid)
    requested_node_ids[pid].reserve(ghost_nodes_from_proc[pid]);

  // We need to get the new pid for each node from the processor
  // which *currently* owns the node.  We can safely skip ourself
  for (node_it = mesh.nodes_begin(); node_it != node_end; ++node_it)
    {
      Node * node = *node_it;
      libmesh_assert(node);
      const processor_id_type current_pid = node->processor_id();
      if (current_pid != mesh.processor_id() &&
          current_pid != DofObject::invalid_processor_id)
        {
          libmesh_assert_less (current_pid, requested_node_ids.size());
          libmesh_assert_less (requested_node_ids[current_pid].size(),
                               ghost_nodes_from_proc[current_pid]);
          requested_node_ids[current_pid].push_back(node->id());
        }

      // Unset any previously-set node processor ids
      node->invalidate_processor_id();
    }

  // Loop over all the active elements
  MeshBase::element_iterator       elem_it  = mesh.active_elements_begin();
  const MeshBase::element_iterator elem_end = mesh.active_elements_end();

  for ( ; elem_it != elem_end; ++elem_it)
    {
      Elem * elem = *elem_it;
      libmesh_assert(elem);

      libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);

      // For each node, set the processor ID to the min of
      // its current value and this Element's processor id.
      //
      // TODO: we would probably get better parallel partitioning if
      // we did something like "min for even numbered nodes, max for
      // odd numbered".  We'd need to be careful about how that would
      // affect solution ordering for I/O, though.
      for (unsigned int n=0; n<elem->n_nodes(); ++n)
        elem->node_ptr(n)->processor_id() = std::min(elem->node_ptr(n)->processor_id(),
                                                     elem->processor_id());
    }

  // And loop over the subactive elements, but don't reassign
  // nodes that are already active on another processor.
  MeshBase::element_iterator       sub_it  = mesh.subactive_elements_begin();
  const MeshBase::element_iterator sub_end = mesh.subactive_elements_end();

  for ( ; sub_it != sub_end; ++sub_it)
    {
      Elem * elem = *sub_it;
      libmesh_assert(elem);

      libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);

      for (unsigned int n=0; n<elem->n_nodes(); ++n)
        if (elem->node_ptr(n)->processor_id() == DofObject::invalid_processor_id)
          elem->node_ptr(n)->processor_id() = elem->processor_id();
    }

  // Same for the inactive elements -- we will have already gotten most of these
  // nodes, *except* for the case of a parent with a subset of children which are
  // ghost elements.  In that case some of the parent nodes will not have been
  // properly handled yet
  MeshBase::element_iterator       not_it  = mesh.not_active_elements_begin();
  const MeshBase::element_iterator not_end = mesh.not_active_elements_end();

  for ( ; not_it != not_end; ++not_it)
    {
      Elem * elem = *not_it;
      libmesh_assert(elem);

      libmesh_assert_not_equal_to (elem->processor_id(), DofObject::invalid_processor_id);

      for (unsigned int n=0; n<elem->n_nodes(); ++n)
        if (elem->node_ptr(n)->processor_id() == DofObject::invalid_processor_id)
          elem->node_ptr(n)->processor_id() = elem->processor_id();
    }

  // We can't assert that all nodes are connected to elements, because
  // a DistributedMesh with NodeConstraints might have pulled in some
  // remote nodes solely for evaluating those constraints.
  // MeshTools::libmesh_assert_connected_nodes(mesh);

  // For such nodes, we'll do a sanity check later when making sure
  // that we successfully reset their processor ids to something
  // valid.

  // Next set node ids from other processors, excluding self
  for (processor_id_type p=1; p != mesh.n_processors(); ++p)
    {
      // Trade my requests with processor procup and procdown
      processor_id_type procup = cast_int<processor_id_type>
        ((mesh.processor_id() + p) % mesh.n_processors());
      processor_id_type procdown = cast_int<processor_id_type>
        ((mesh.n_processors() + mesh.processor_id() - p) %
         mesh.n_processors());
      std::vector<dof_id_type> request_to_fill;
      mesh.comm().send_receive(procup, requested_node_ids[procup],
                               procdown, request_to_fill);

      // Fill those requests in-place
      for (std::size_t i=0; i != request_to_fill.size(); ++i)
        {
          Node & node = mesh.node_ref(request_to_fill[i]);
          const processor_id_type new_pid = node.processor_id();

          // We may have an invalid processor_id() on nodes that have been
          // "detatched" from coarsened-away elements but that have not yet
          // themselves been removed.
          // libmesh_assert_not_equal_to (new_pid, DofObject::invalid_processor_id);
          // libmesh_assert_less (new_pid, mesh.n_partitions()); // this is the correct test --
          request_to_fill[i] = new_pid;           //  the number of partitions may
        }                                         //  not equal the number of processors

      // Trade back the results
      std::vector<dof_id_type> filled_request;
      mesh.comm().send_receive(procdown, request_to_fill,
                               procup,   filled_request);
      libmesh_assert_equal_to (filled_request.size(), requested_node_ids[procup].size());

      // And copy the id changes we've now been informed of
      for (std::size_t i=0; i != filled_request.size(); ++i)
        {
          Node & node = mesh.node_ref(requested_node_ids[procup][i]);

          // this is the correct test -- the number of partitions may
          // not equal the number of processors

          // But: we may have an invalid processor_id() on nodes that
          // have been "detatched" from coarsened-away elements but
          // that have not yet themselves been removed.
          // libmesh_assert_less (filled_request[i], mesh.n_partitions());

          node.processor_id(cast_int<processor_id_type>(filled_request[i]));
        }
    }

#ifdef DEBUG
  MeshTools::libmesh_assert_valid_procids<Node>(mesh);
#endif
}
void MeshTools::Subdivision::add_boundary_ghosts(MeshBase & mesh)
{
  static const Real tol = 1e-5;

  // add the mirrored ghost elements (without using iterators, because the mesh is modified in the course)
  std::vector<Tri3Subdivision *> ghost_elems;
  std::vector<Node *> ghost_nodes;
  const unsigned int n_elem = mesh.n_elem();
  for (unsigned int eid = 0; eid < n_elem; ++eid)
    {
      Elem * elem = mesh.elem_ptr(eid);
      libmesh_assert_equal_to(elem->type(), TRI3SUBDIVISION);

      // If the triangle happens to be in a corner (two boundary
      // edges), we perform a counter-clockwise loop by mirroring the
      // previous triangle until we come back to the original
      // triangle.  This prevents degenerated triangles in the mesh
      // corners and guarantees that the node in the middle of the
      // loop is of valence=6.
      for (unsigned int i = 0; i < elem->n_sides(); ++i)
        {
          libmesh_assert_not_equal_to(elem->neighbor(i), elem);

          if (elem->neighbor(i) == libmesh_nullptr &&
              elem->neighbor(next[i]) == libmesh_nullptr)
            {
              Elem * nelem = elem;
              unsigned int k = i;
              for (unsigned int l=0;l<4;l++)
                {
                  // this is the vertex to be mirrored
                  Point point = nelem->point(k) + nelem->point(next[k]) - nelem->point(prev[k]);

                  // Check if the proposed vertex doesn't coincide
                  // with one of the existing vertices.  This is
                  // necessary because for some triangulations, it can
                  // happen that two mirrored ghost vertices coincide,
                  // which would then lead to a zero size ghost
                  // element below.
                  Node * node = libmesh_nullptr;
                  for (unsigned int j = 0; j < ghost_nodes.size(); ++j)
                    {
                      if ((*ghost_nodes[j] - point).norm() < tol * (elem->point(k) - point).norm())
                        {
                          node = ghost_nodes[j];
                          break;
                        }
                    }

                  // add the new vertex only if no other is nearby
                  if (node == libmesh_nullptr)
                    {
                      node = mesh.add_point(point);
                      ghost_nodes.push_back(node);
                    }

                  Tri3Subdivision * newelem = new Tri3Subdivision();

                  // add the first new ghost element to the list just as in the non-corner case
                  if (l == 0)
                    ghost_elems.push_back(newelem);

                  newelem->set_node(0) = nelem->node_ptr(next[k]);
                  newelem->set_node(1) = nelem->node_ptr(k);
                  newelem->set_node(2) = node;
                  newelem->set_neighbor(0, nelem);
                  newelem->set_ghost(true);
                  if (l>0)
                    newelem->set_neighbor(2, libmesh_nullptr);
                  nelem->set_neighbor(k, newelem);

                  mesh.add_elem(newelem);
                  mesh.get_boundary_info().add_node(nelem->node_ptr(k), 1);
                  mesh.get_boundary_info().add_node(nelem->node_ptr(next[k]), 1);
                  mesh.get_boundary_info().add_node(nelem->node_ptr(prev[k]), 1);
                  mesh.get_boundary_info().add_node(node, 1);

                  nelem = newelem;
                  k = 2 ;
                }

              Tri3Subdivision * newelem = new Tri3Subdivision();

              newelem->set_node(0) = elem->node_ptr(next[i]);
              newelem->set_node(1) = nelem->node_ptr(2);
              newelem->set_node(2) = elem->node_ptr(prev[i]);
              newelem->set_neighbor(0, nelem);
              nelem->set_neighbor(2, newelem);
              newelem->set_ghost(true);
              newelem->set_neighbor(2, elem);
              elem->set_neighbor(next[i],newelem);

              mesh.add_elem(newelem);

              break;
            }
        }

      for (unsigned int i = 0; i < elem->n_sides(); ++i)
        {
          libmesh_assert_not_equal_to(elem->neighbor(i), elem);
          if (elem->neighbor(i) == libmesh_nullptr)
            {
              // this is the vertex to be mirrored
              Point point = elem->point(i) + elem->point(next[i]) - elem->point(prev[i]);

              // Check if the proposed vertex doesn't coincide with
              // one of the existing vertices.  This is necessary
              // because for some triangulations, it can happen that
              // two mirrored ghost vertices coincide, which would
              // then lead to a zero size ghost element below.
              Node * node = libmesh_nullptr;
              for (unsigned int j = 0; j < ghost_nodes.size(); ++j)
                {
                  if ((*ghost_nodes[j] - point).norm() < tol * (elem->point(i) - point).norm())
                    {
                      node = ghost_nodes[j];
                      break;
                    }
                }

              // add the new vertex only if no other is nearby
              if (node == libmesh_nullptr)
                {
                  node = mesh.add_point(point);
                  ghost_nodes.push_back(node);
                }

              Tri3Subdivision * newelem = new Tri3Subdivision();
              ghost_elems.push_back(newelem);

              newelem->set_node(0) = elem->node_ptr(next[i]);
              newelem->set_node(1) = elem->node_ptr(i);
              newelem->set_node(2) = node;
              newelem->set_neighbor(0, elem);
              newelem->set_ghost(true);
              elem->set_neighbor(i, newelem);

              mesh.add_elem(newelem);
              mesh.get_boundary_info().add_node(elem->node_ptr(i), 1);
              mesh.get_boundary_info().add_node(elem->node_ptr(next[i]), 1);
              mesh.get_boundary_info().add_node(elem->node_ptr(prev[i]), 1);
              mesh.get_boundary_info().add_node(node, 1);
            }
        }
    }

  // add the missing ghost elements (connecting new ghost nodes)
  std::vector<Tri3Subdivision *> missing_ghost_elems;
  std::vector<Tri3Subdivision *>::iterator       ghost_el     = ghost_elems.begin();
  const std::vector<Tri3Subdivision *>::iterator end_ghost_el = ghost_elems.end();
  for (; ghost_el != end_ghost_el; ++ghost_el)
    {
      Tri3Subdivision * elem = *ghost_el;
      libmesh_assert(elem->is_ghost());

      for (unsigned int i = 0; i < elem->n_sides(); ++i)
        {
          if (elem->neighbor(i) == libmesh_nullptr &&
              elem->neighbor(prev[i]) != libmesh_nullptr)
            {
              // go around counter-clockwise
              Tri3Subdivision * nb1 = static_cast<Tri3Subdivision *>(elem->neighbor(prev[i]));
              Tri3Subdivision * nb2 = nb1;
              unsigned int j = i;
              unsigned int n_nb = 0;
              while (nb1 != libmesh_nullptr && nb1->id() != elem->id())
                {
                  j = nb1->local_node_number(elem->node_id(i));
                  nb2 = nb1;
                  nb1 = static_cast<Tri3Subdivision *>(nb1->neighbor(prev[j]));
                  libmesh_assert(nb1 == libmesh_nullptr || nb1->id() != nb2->id());
                  n_nb++;
                }

              libmesh_assert_not_equal_to(nb2->id(), elem->id());

              // Above, we merged coinciding ghost vertices. Therefore, we need
              // to exclude the case where there is no ghost element to add between
              // these two (identical) ghost nodes.
              if (elem->node_ptr(next[i])->id() == nb2->node_ptr(prev[j])->id())
                break;

              // If the number of already present neighbors is less than 4, we add another extra element
              // so that the node in the middle of the loop ends up being of valence=6.
              // This case usually happens when the middle node corresponds to a corner of the original mesh,
              // and the extra element below prevents degenerated triangles in the mesh corners.
              if (n_nb < 4)
                {
                  // this is the vertex to be mirrored
                  Point point = nb2->point(j) + nb2->point(prev[j]) - nb2->point(next[j]);

                  // Check if the proposed vertex doesn't coincide with one of the existing vertices.
                  // This is necessary because for some triangulations, it can happen that two mirrored
                  // ghost vertices coincide, which would then lead to a zero size ghost element below.
                  Node * node = libmesh_nullptr;
                  for (unsigned int k = 0; k < ghost_nodes.size(); ++k)
                    {
                      if ((*ghost_nodes[k] - point).norm() < tol * (nb2->point(j) - point).norm())
                        {
                          node = ghost_nodes[k];
                          break;
                        }
                    }

                  // add the new vertex only if no other is nearby
                  if (node == libmesh_nullptr)
                    {
                      node = mesh.add_point(point);
                      ghost_nodes.push_back(node);
                    }

                  Tri3Subdivision * newelem = new Tri3Subdivision();

                  newelem->set_node(0) = nb2->node_ptr(j);
                  newelem->set_node(1) = nb2->node_ptr(prev[j]);
                  newelem->set_node(2) = node;
                  newelem->set_neighbor(0, nb2);
                  newelem->set_neighbor(1, libmesh_nullptr);
                  newelem->set_ghost(true);
                  nb2->set_neighbor(prev[j], newelem);

                  mesh.add_elem(newelem);
                  mesh.get_boundary_info().add_node(nb2->node_ptr(j), 1);
                  mesh.get_boundary_info().add_node(nb2->node_ptr(prev[j]), 1);
                  mesh.get_boundary_info().add_node(node, 1);

                  nb2 = newelem;
                  j = nb2->local_node_number(elem->node_id(i));
                }

              Tri3Subdivision * newelem = new Tri3Subdivision();
              newelem->set_node(0) = elem->node_ptr(next[i]);
              newelem->set_node(1) = elem->node_ptr(i);
              newelem->set_node(2) = nb2->node_ptr(prev[j]);
              newelem->set_neighbor(0, elem);
              newelem->set_neighbor(1, nb2);
              newelem->set_neighbor(2, libmesh_nullptr);
              newelem->set_ghost(true);

              elem->set_neighbor(i, newelem);
              nb2->set_neighbor(prev[j], newelem);

              missing_ghost_elems.push_back(newelem);
              break;
            }
        } // end side loop
    } // end ghost element loop

  // add the missing ghost elements to the mesh
  std::vector<Tri3Subdivision *>::iterator       missing_el     = missing_ghost_elems.begin();
  const std::vector<Tri3Subdivision *>::iterator end_missing_el = missing_ghost_elems.end();
  for (; missing_el != end_missing_el; ++missing_el)
    mesh.add_elem(*missing_el);
}