示例#1
0
void integrate_function (const MeshBase &mesh)
{
#if defined(LIBMESH_HAVE_TRIANGLE) && defined(LIBMESH_HAVE_TETGEN)
    MeshBase::const_element_iterator       el     = mesh.active_local_elements_begin();
    const MeshBase::const_element_iterator end_el = mesh.active_local_elements_end();

    std::vector<Real> vertex_distance;

    QComposite<QGauss> qrule (mesh.mesh_dimension(), FIRST);
    //QGauss qrule (mesh.mesh_dimension(), FIRST);

    UniquePtr<FEBase> fe (FEBase::build (mesh.mesh_dimension(), FEType (FIRST, LAGRANGE)));

    Real int_val=0.;

    const std::vector<Point> &q_points = fe->get_xyz();
    const std::vector<Real>  &JxW      = fe->get_JxW();

    for (; el!=end_el; ++el)
    {
        const Elem *elem = *el;

        vertex_distance.clear();

        for (unsigned int v=0; v<elem->n_vertices(); v++)
            vertex_distance.push_back (distance(elem->point(v)));

        qrule.init (*elem, vertex_distance);

        fe->reinit (elem,
                    &(qrule.get_points()),
                    &(qrule.get_weights()));


        // TODO:  would it be valuable to have the composite quadrature rule sort
        // from smallest to largest JxW value to help prevent
        // ... large + small + large + large + small ...
        // type truncation errors?
        for (unsigned int qp=0; qp<q_points.size(); qp++)
            int_val += JxW[qp] * integrand(q_points[qp]);
    }

    mesh.comm().sum (int_val);

    std::cout  << "\n***********************************\n"
               << " int_val   = " << int_val << std::endl
               << " exact_val = " <<  1*(2*2 - radius*radius*pi) + 10.*(radius*radius*pi)
               << "\n***********************************\n"
               << std::endl;
#else
    libmesh_ignore(mesh);
#endif
}
示例#2
0
void ParmetisPartitioner::build_graph (const MeshBase & mesh)
{
  // build the graph in distributed CSR format.  Note that
  // the edges in the graph will correspond to
  // face neighbors
  const dof_id_type n_active_local_elem  = mesh.n_active_local_elem();

  // If we have boundary elements in this mesh, we want to account for
  // the connectivity between them and interior elements.  We can find
  // interior elements from boundary elements, but we need to build up
  // a lookup map to do the reverse.

  typedef LIBMESH_BEST_UNORDERED_MULTIMAP<const Elem *, const Elem *>
    map_type;
  map_type interior_to_boundary_map;

  {
    MeshBase::const_element_iterator       elem_it  = mesh.active_elements_begin();
    const MeshBase::const_element_iterator elem_end = mesh.active_elements_end();

    for (; elem_it != elem_end; ++elem_it)
      {
        const Elem * elem = *elem_it;

        // If we don't have an interior_parent then there's nothing to look us
        // up.
        if ((elem->dim() >= LIBMESH_DIM) ||
            !elem->interior_parent())
          continue;

        // get all relevant interior elements
        std::set<const Elem *> neighbor_set;
        elem->find_interior_neighbors(neighbor_set);

        std::set<const Elem *>::iterator n_it = neighbor_set.begin();
        for (; n_it != neighbor_set.end(); ++n_it)
          {
            // FIXME - non-const versions of the Elem set methods
            // would be nice
            Elem * neighbor = const_cast<Elem *>(*n_it);

#if defined(LIBMESH_HAVE_UNORDERED_MULTIMAP) || \
  defined(LIBMESH_HAVE_TR1_UNORDERED_MAP) ||    \
  defined(LIBMESH_HAVE_HASH_MAP) ||             \
  defined(LIBMESH_HAVE_EXT_HASH_MAP)
            interior_to_boundary_map.insert
              (std::make_pair(neighbor, elem));
#else
            interior_to_boundary_map.insert
              (interior_to_boundary_map.begin(),
               std::make_pair(neighbor, elem));
#endif
          }
      }
  }

#ifdef LIBMESH_ENABLE_AMR
  std::vector<const Elem *> neighbors_offspring;
#endif

  std::vector<std::vector<dof_id_type> > graph(n_active_local_elem);
  dof_id_type graph_size=0;

  const dof_id_type first_local_elem = _pmetis->vtxdist[mesh.processor_id()];

  MeshBase::const_element_iterator       elem_it  = mesh.active_local_elements_begin();
  const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end();

  for (; elem_it != elem_end; ++elem_it)
    {
      const Elem * elem = *elem_it;

      libmesh_assert (_global_index_by_pid_map.count(elem->id()));
      const dof_id_type global_index_by_pid =
        _global_index_by_pid_map[elem->id()];

      const dof_id_type local_index =
        global_index_by_pid - first_local_elem;
      libmesh_assert_less (local_index, n_active_local_elem);

      std::vector<dof_id_type> & graph_row = graph[local_index];

      // Loop over the element's neighbors.  An element
      // adjacency corresponds to a face neighbor
      for (unsigned int ms=0; ms<elem->n_neighbors(); ms++)
        {
          const Elem * neighbor = elem->neighbor(ms);

          if (neighbor != libmesh_nullptr)
            {
              // If the neighbor is active treat it
              // as a connection
              if (neighbor->active())
                {
                  libmesh_assert(_global_index_by_pid_map.count(neighbor->id()));
                  const dof_id_type neighbor_global_index_by_pid =
                    _global_index_by_pid_map[neighbor->id()];

                  graph_row.push_back(neighbor_global_index_by_pid);
                  graph_size++;
                }

#ifdef LIBMESH_ENABLE_AMR

              // Otherwise we need to find all of the
              // neighbor's children that are connected to
              // us and add them
              else
                {
                  // The side of the neighbor to which
                  // we are connected
                  const unsigned int ns =
                    neighbor->which_neighbor_am_i (elem);
                  libmesh_assert_less (ns, neighbor->n_neighbors());

                  // Get all the active children (& grandchildren, etc...)
                  // of the neighbor

                  // FIXME - this is the wrong thing, since we
                  // should be getting the active family tree on
                  // our side only.  But adding too many graph
                  // links may cause hanging nodes to tend to be
                  // on partition interiors, which would reduce
                  // communication overhead for constraint
                  // equations, so we'll leave it.

                  neighbor->active_family_tree (neighbors_offspring);

                  // Get all the neighbor's children that
                  // live on that side and are thus connected
                  // to us
                  for (unsigned int nc=0; nc<neighbors_offspring.size(); nc++)
                    {
                      const Elem * child =
                        neighbors_offspring[nc];

                      // This does not assume a level-1 mesh.
                      // Note that since children have sides numbered
                      // coincident with the parent then this is a sufficient test.
                      if (child->neighbor(ns) == elem)
                        {
                          libmesh_assert (child->active());
                          libmesh_assert (_global_index_by_pid_map.count(child->id()));
                          const dof_id_type child_global_index_by_pid =
                            _global_index_by_pid_map[child->id()];

                          graph_row.push_back(child_global_index_by_pid);
                          graph_size++;
                        }
                    }
                }

#endif /* ifdef LIBMESH_ENABLE_AMR */


            }
        }

      if ((elem->dim() < LIBMESH_DIM) &&
          elem->interior_parent())
        {
          // get all relevant interior elements
          std::set<const Elem *> neighbor_set;
          elem->find_interior_neighbors(neighbor_set);

          std::set<const Elem *>::iterator n_it = neighbor_set.begin();
          for (; n_it != neighbor_set.end(); ++n_it)
            {
              // FIXME - non-const versions of the Elem set methods
              // would be nice
              Elem * neighbor = const_cast<Elem *>(*n_it);

              const dof_id_type neighbor_global_index_by_pid =
                _global_index_by_pid_map[neighbor->id()];

              graph_row.push_back(neighbor_global_index_by_pid);
              graph_size++;
            }
        }

      // Check for any boundary neighbors
      typedef map_type::iterator map_it_type;
      std::pair<map_it_type, map_it_type>
        bounds = interior_to_boundary_map.equal_range(elem);

      for (map_it_type it = bounds.first; it != bounds.second; ++it)
        {
          const Elem * neighbor = it->second;

          const dof_id_type neighbor_global_index_by_pid =
            _global_index_by_pid_map[neighbor->id()];

          graph_row.push_back(neighbor_global_index_by_pid);
          graph_size++;
        }
    }

  // Reserve space in the adjacency array
  _pmetis->xadj.clear();
  _pmetis->xadj.reserve (n_active_local_elem + 1);
  _pmetis->adjncy.clear();
  _pmetis->adjncy.reserve (graph_size);

  for (std::size_t r=0; r<graph.size(); r++)
    {
      _pmetis->xadj.push_back(_pmetis->adjncy.size());
      std::vector<dof_id_type> graph_row; // build this emtpy
      graph_row.swap(graph[r]); // this will deallocate at the end of scope
      _pmetis->adjncy.insert(_pmetis->adjncy.end(),
                             graph_row.begin(),
                             graph_row.end());
    }

  // The end of the adjacency array for the last elem
  _pmetis->xadj.push_back(_pmetis->adjncy.size());

  libmesh_assert_equal_to (_pmetis->xadj.size(), n_active_local_elem+1);
  libmesh_assert_equal_to (_pmetis->adjncy.size(), graph_size);
}
示例#3
0
void ParmetisPartitioner::initialize (const MeshBase & mesh,
                                      const unsigned int n_sbdmns)
{
  const dof_id_type n_active_local_elem = mesh.n_active_local_elem();

  // Set parameters.
  _pmetis->wgtflag = 2;                                      // weights on vertices only
  _pmetis->ncon    = 1;                                      // one weight per vertex
  _pmetis->numflag = 0;                                      // C-style 0-based numbering
  _pmetis->nparts  = static_cast<Parmetis::idx_t>(n_sbdmns); // number of subdomains to create
  _pmetis->edgecut = 0;                                      // the numbers of edges cut by the
                                                             // partition

  // Initialize data structures for ParMETIS
  _pmetis->vtxdist.resize (mesh.n_processors()+1); std::fill (_pmetis->vtxdist.begin(), _pmetis->vtxdist.end(), 0);
  _pmetis->tpwgts.resize  (_pmetis->nparts);       std::fill (_pmetis->tpwgts.begin(),  _pmetis->tpwgts.end(),  1./_pmetis->nparts);
  _pmetis->ubvec.resize   (_pmetis->ncon);         std::fill (_pmetis->ubvec.begin(),   _pmetis->ubvec.end(),   1.05);
  _pmetis->part.resize    (n_active_local_elem);   std::fill (_pmetis->part.begin(),    _pmetis->part.end(), 0);
  _pmetis->options.resize (5);
  _pmetis->vwgt.resize    (n_active_local_elem);

  // Set the options
  _pmetis->options[0] = 1;  // don't use default options
  _pmetis->options[1] = 0;  // default (level of timing)
  _pmetis->options[2] = 15; // random seed (default)
  _pmetis->options[3] = 2;  // processor distribution and subdomain distribution are decoupled

  // Find the number of active elements on each processor.  We cannot use
  // mesh.n_active_elem_on_proc(pid) since that only returns the number of
  // elements assigned to pid which are currently stored on the calling
  // processor. This will not in general be correct for parallel meshes
  // when (pid!=mesh.processor_id()).
  _n_active_elem_on_proc.resize(mesh.n_processors());
  mesh.comm().allgather(n_active_local_elem, _n_active_elem_on_proc);

  // count the total number of active elements in the mesh.  Note we cannot
  // use mesh.n_active_elem() in general since this only returns the number
  // of active elements which are stored on the calling processor.
  // We should not use n_active_elem for any allocation because that will
  // be inheritly unscalable, but it can be useful for libmesh_assertions.
  dof_id_type n_active_elem=0;

  // Set up the vtxdist array.  This will be the same on each processor.
  // ***** Consult the Parmetis documentation. *****
  libmesh_assert_equal_to (_pmetis->vtxdist.size(),
                           cast_int<std::size_t>(mesh.n_processors()+1));
  libmesh_assert_equal_to (_pmetis->vtxdist[0], 0);

  for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
    {
      _pmetis->vtxdist[pid+1] = _pmetis->vtxdist[pid] + _n_active_elem_on_proc[pid];
      n_active_elem += _n_active_elem_on_proc[pid];
    }
  libmesh_assert_equal_to (_pmetis->vtxdist.back(), static_cast<Parmetis::idx_t>(n_active_elem));

  // ParMetis expects the elements to be numbered in contiguous blocks
  // by processor, i.e. [0, ne0), [ne0, ne0+ne1), ...
  // Since we only partition active elements we should have no expectation
  // that we currently have such a distribution.  So we need to create it.
  // Also, at the same time we are going to map all the active elements into a globally
  // unique range [0,n_active_elem) which is *independent* of the current partitioning.
  // This can be fed to ParMetis as the initial partitioning of the subdomains (decoupled
  // from the partitioning of the objects themselves).  This allows us to get the same
  // resultant partitioning independed of the input partitioning.
  MeshTools::BoundingBox bbox =
    MeshTools::bounding_box(mesh);

  _global_index_by_pid_map.clear();

  // Maps active element ids into a contiguous range independent of partitioning.
  // (only needs local scope)
  vectormap<dof_id_type, dof_id_type> global_index_map;

  {
    std::vector<dof_id_type> global_index;

    // create the mapping which is contiguous by processor
    dof_id_type pid_offset=0;
    for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
      {
        MeshBase::const_element_iterator       it  = mesh.active_pid_elements_begin(pid);
        const MeshBase::const_element_iterator end = mesh.active_pid_elements_end(pid);

        // note that we may not have all (or any!) the active elements which belong on this processor,
        // but by calling this on all processors a unique range in [0,_n_active_elem_on_proc[pid])
        // is constructed.  Only the indices for the elements we pass in are returned in the array.
        MeshCommunication().find_global_indices (mesh.comm(),
                                                 bbox, it, end,
                                                 global_index);

        for (dof_id_type cnt=0; it != end; ++it)
          {
            const Elem * elem = *it;
            libmesh_assert (!_global_index_by_pid_map.count(elem->id()));
            libmesh_assert_less (cnt, global_index.size());
            libmesh_assert_less (global_index[cnt], _n_active_elem_on_proc[pid]);

            _global_index_by_pid_map.insert(std::make_pair(elem->id(), global_index[cnt++] + pid_offset));
          }

        pid_offset += _n_active_elem_on_proc[pid];
      }

    // create the unique mapping for all active elements independent of partitioning
    {
      MeshBase::const_element_iterator       it  = mesh.active_elements_begin();
      const MeshBase::const_element_iterator end = mesh.active_elements_end();

      // Calling this on all processors a unique range in [0,n_active_elem) is constructed.
      // Only the indices for the elements we pass in are returned in the array.
      MeshCommunication().find_global_indices (mesh.comm(),
                                               bbox, it, end,
                                               global_index);

      for (dof_id_type cnt=0; it != end; ++it)
        {
          const Elem * elem = *it;
          libmesh_assert (!global_index_map.count(elem->id()));
          libmesh_assert_less (cnt, global_index.size());
          libmesh_assert_less (global_index[cnt], n_active_elem);

          global_index_map.insert(std::make_pair(elem->id(), global_index[cnt++]));
        }
    }
    // really, shouldn't be close!
    libmesh_assert_less_equal (global_index_map.size(), n_active_elem);
    libmesh_assert_less_equal (_global_index_by_pid_map.size(), n_active_elem);

    // At this point the two maps should be the same size.  If they are not
    // then the number of active elements is not the same as the sum over all
    // processors of the number of active elements per processor, which means
    // there must be some unpartitioned objects out there.
    if (global_index_map.size() != _global_index_by_pid_map.size())
      libmesh_error_msg("ERROR:  ParmetisPartitioner cannot handle unpartitioned objects!");
  }

  // Finally, we need to initialize the vertex (partition) weights and the initial subdomain
  // mapping.  The subdomain mapping will be independent of the processor mapping, and is
  // defined by a simple mapping of the global indices we just found.
  {
    std::vector<dof_id_type> subdomain_bounds(mesh.n_processors());

    const dof_id_type first_local_elem = _pmetis->vtxdist[mesh.processor_id()];

    for (processor_id_type pid=0; pid<mesh.n_processors(); pid++)
      {
        dof_id_type tgt_subdomain_size = 0;

        // watch out for the case that n_subdomains < n_processors
        if (pid < static_cast<unsigned int>(_pmetis->nparts))
          {
            tgt_subdomain_size = n_active_elem/std::min
              (cast_int<Parmetis::idx_t>(mesh.n_processors()), _pmetis->nparts);

            if (pid < n_active_elem%_pmetis->nparts)
              tgt_subdomain_size++;
          }
        if (pid == 0)
          subdomain_bounds[0] = tgt_subdomain_size;
        else
          subdomain_bounds[pid] = subdomain_bounds[pid-1] + tgt_subdomain_size;
      }

    libmesh_assert_equal_to (subdomain_bounds.back(), n_active_elem);

    MeshBase::const_element_iterator       elem_it  = mesh.active_local_elements_begin();
    const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end();

    for (; elem_it != elem_end; ++elem_it)
      {
        const Elem * elem = *elem_it;

        libmesh_assert (_global_index_by_pid_map.count(elem->id()));
        const dof_id_type global_index_by_pid =
          _global_index_by_pid_map[elem->id()];
        libmesh_assert_less (global_index_by_pid, n_active_elem);

        const dof_id_type local_index =
          global_index_by_pid - first_local_elem;

        libmesh_assert_less (local_index, n_active_local_elem);
        libmesh_assert_less (local_index, _pmetis->vwgt.size());

        // TODO:[BSK] maybe there is a better weight?
        _pmetis->vwgt[local_index] = elem->n_nodes();

        // find the subdomain this element belongs in
        libmesh_assert (global_index_map.count(elem->id()));
        const dof_id_type global_index =
          global_index_map[elem->id()];

        libmesh_assert_less (global_index, subdomain_bounds.back());

        const unsigned int subdomain_id =
          std::distance(subdomain_bounds.begin(),
                        std::lower_bound(subdomain_bounds.begin(),
                                         subdomain_bounds.end(),
                                         global_index));
        libmesh_assert_less (subdomain_id, static_cast<unsigned int>(_pmetis->nparts));
        libmesh_assert_less (local_index, _pmetis->part.size());

        _pmetis->part[local_index] = subdomain_id;
      }
  }
}
void ParmetisPartitioner::build_graph (const MeshBase& mesh)
{
  // build the graph in distributed CSR format.  Note that
  // the edges in the graph will correspond to
  // face neighbors
  const unsigned int n_active_local_elem  = mesh.n_active_local_elem();

  std::vector<const Elem*> neighbors_offspring;

  std::vector<std::vector<unsigned int> > graph(n_active_local_elem);
  unsigned int graph_size=0;

  const unsigned int first_local_elem = _vtxdist[libMesh::processor_id()];

  MeshBase::const_element_iterator       elem_it  = mesh.active_local_elements_begin();
  const MeshBase::const_element_iterator elem_end = mesh.active_local_elements_end();

  for (; elem_it != elem_end; ++elem_it)
    {
      const Elem* elem = *elem_it;

      libmesh_assert (_global_index_by_pid_map.count(elem->id()));
      const unsigned int global_index_by_pid =
	_global_index_by_pid_map[elem->id()];

      const unsigned int local_index =
	global_index_by_pid - first_local_elem;
      libmesh_assert_less (local_index, n_active_local_elem);

      std::vector<unsigned int> &graph_row = graph[local_index];

      // Loop over the element's neighbors.  An element
      // adjacency corresponds to a face neighbor
      for (unsigned int ms=0; ms<elem->n_neighbors(); ms++)
	{
	  const Elem* neighbor = elem->neighbor(ms);

	  if (neighbor != NULL)
	    {
	      // If the neighbor is active treat it
	      // as a connection
	      if (neighbor->active())
		{
		  libmesh_assert(_global_index_by_pid_map.count(neighbor->id()));
		  const unsigned int neighbor_global_index_by_pid =
		    _global_index_by_pid_map[neighbor->id()];

		  graph_row.push_back(neighbor_global_index_by_pid);
		  graph_size++;
		}

#ifdef LIBMESH_ENABLE_AMR

	      // Otherwise we need to find all of the
	      // neighbor's children that are connected to
	      // us and add them
	      else
		{
		  // The side of the neighbor to which
		  // we are connected
		  const unsigned int ns =
		    neighbor->which_neighbor_am_i (elem);
                  libmesh_assert_less (ns, neighbor->n_neighbors());

		  // Get all the active children (& grandchildren, etc...)
		  // of the neighbor.
		  neighbor->active_family_tree (neighbors_offspring);

		  // Get all the neighbor's children that
		  // live on that side and are thus connected
		  // to us
		  for (unsigned int nc=0; nc<neighbors_offspring.size(); nc++)
		    {
		      const Elem* child =
			neighbors_offspring[nc];

		      // This does not assume a level-1 mesh.
		      // Note that since children have sides numbered
		      // coincident with the parent then this is a sufficient test.
		      if (child->neighbor(ns) == elem)
			{
			  libmesh_assert (child->active());
			  libmesh_assert (_global_index_by_pid_map.count(child->id()));
			  const unsigned int child_global_index_by_pid =
			    _global_index_by_pid_map[child->id()];

			  graph_row.push_back(child_global_index_by_pid);
			  graph_size++;
			}
		    }
		}

#endif /* ifdef LIBMESH_ENABLE_AMR */


	    }
	}
    }

  // Reserve space in the adjacency array
  _xadj.clear();
  _xadj.reserve (n_active_local_elem + 1);
  _adjncy.clear();
  _adjncy.reserve (graph_size);

  for (unsigned int r=0; r<graph.size(); r++)
    {
      _xadj.push_back(_adjncy.size());
      std::vector<unsigned int> graph_row; // build this emtpy
      graph_row.swap(graph[r]); // this will deallocate at the end of scope
      _adjncy.insert(_adjncy.end(),
		     graph_row.begin(),
		     graph_row.end());
    }

  // The end of the adjacency array for the last elem
  _xadj.push_back(_adjncy.size());

  libmesh_assert_equal_to (_xadj.size(), n_active_local_elem+1);
  libmesh_assert_equal_to (_adjncy.size(), graph_size);
}