Exemplo n.º 1
0
//----------------------------------------------------------------------------
void OpenMpAssembler::assemble(GenericTensor& A, const Form& a)
{
  // Get mesh
  const Mesh& mesh = a.mesh();

  if (MPI::size(mesh.mpi_comm()) > 1)
  {
    dolfin_error("OpenMPAssembler.cpp",
                 "perform multithreaded assembly using OpenMP assembler",
                 "The OpenMp assembler has not been tested in combination with MPI");
  }

  dolfin_assert(a.ufc_form());

  // All assembler functions above end up calling this function, which
  // in turn calls the assembler functions below to assemble over
  // cells, exterior and interior facets. Note the importance of
  // treating empty mesh functions as null pointers for the PyDOLFIN
  // interface.

  // Get cell domains
  std::shared_ptr<const MeshFunction<std::size_t>> cell_domains
    = a.cell_domains();

  // Get exterior facet domains
  std::shared_ptr<const MeshFunction<std::size_t>> exterior_facet_domains
    = a.exterior_facet_domains();

  // Get interior facet domains
  std::shared_ptr<const MeshFunction<std::size_t>> interior_facet_domains
    = a.interior_facet_domains();

  // Check form
  AssemblerBase::check(a);

  // Create data structure for local assembly data
  UFC ufc(a);

  // Initialize global tensor
  init_global_tensor(A, a);

  // FIXME: The below selections should be made robust
  if (a.ufc_form()->has_interior_facet_integrals())
    assemble_interior_facets(A, a, ufc, interior_facet_domains, cell_domains, 0);

  if (a.ufc_form()->has_exterior_facet_integrals())
  {
    assemble_cells_and_exterior_facets(A, a, ufc, cell_domains,
                                       exterior_facet_domains, 0);
  }
  else
    assemble_cells(A, a, ufc, cell_domains, 0);

  // Finalize assembly of global tensor
  if (finalize_tensor)
    A.apply("add");
}
Exemplo n.º 2
0
//----------------------------------------------------------------------------
void Assembler::assemble(GenericTensor& A, const Form& a)
{
  // All assembler functions above end up calling this function, which
  // in turn calls the assembler functions below to assemble over
  // cells, exterior and interior facets.

  // Check whether we should call the multi-core assembler
  #ifdef HAS_OPENMP
  const std::size_t num_threads = parameters["num_threads"];
  if (num_threads > 0)
  {
    OpenMpAssembler assembler;
    assembler.add_values = add_values;
    assembler.finalize_tensor = finalize_tensor;
    assembler.keep_diagonal = keep_diagonal;
    assembler.assemble(A, a);
    return;
  }
  #endif

  // Get cell domains
  std::shared_ptr<const MeshFunction<std::size_t>>
    cell_domains = a.cell_domains();

  // Get exterior facet domains
  std::shared_ptr<const MeshFunction<std::size_t>> exterior_facet_domains
      = a.exterior_facet_domains();

  // Get interior facet domains
  std::shared_ptr<const MeshFunction<std::size_t>> interior_facet_domains
      = a.interior_facet_domains();

  // Get vertex domains
  std::shared_ptr<const MeshFunction<std::size_t>> vertex_domains
    = a.vertex_domains();

  // Check form
  AssemblerBase::check(a);

  // Create data structure for local assembly data
  UFC ufc(a);

  // Update off-process coefficients
  const std::vector<std::shared_ptr<const GenericFunction>>
    coefficients = a.coefficients();

  // Initialize global tensor
  init_global_tensor(A, a);

  // Assemble over cells
  assemble_cells(A, a, ufc, cell_domains, NULL);

  // Assemble over exterior facets
  assemble_exterior_facets(A, a, ufc, exterior_facet_domains, NULL);

  // Assemble over interior facets
  assemble_interior_facets(A, a, ufc, interior_facet_domains,
                           cell_domains, NULL);

  // Assemble over vertices
  assemble_vertices(A, a, ufc, vertex_domains);

  // Finalize assembly of global tensor
  if (finalize_tensor)
    A.apply("add");
}