示例#1
0
//-----------------------------------------------------------------------------
void AssemblerBase::init_global_tensor(GenericTensor& A, const Form& a)
{
  dolfin_assert(a.ufc_form());

  // Get dof maps
  std::vector<const GenericDofMap*> dofmaps;
  for (std::size_t i = 0; i < a.rank(); ++i)
    dofmaps.push_back(a.function_space(i)->dofmap().get());

  if (A.size(0) == 0)
  {
    Timer t0("Build sparsity");

    // Create layout for initialising tensor
    std::shared_ptr<TensorLayout> tensor_layout;
    tensor_layout = A.factory().create_layout(a.rank());
    dolfin_assert(tensor_layout);

    // Get dimensions
    std::vector<std::size_t> global_dimensions;
    std::vector<std::pair<std::size_t, std::size_t> > local_range;
    std::vector<std::size_t> block_sizes;
    for (std::size_t i = 0; i < a.rank(); i++)
    {
      dolfin_assert(dofmaps[i]);
      global_dimensions.push_back(dofmaps[i]->global_dimension());
      local_range.push_back(dofmaps[i]->ownership_range());
      block_sizes.push_back(dofmaps[i]->block_size);
    }

    // Set block size for sparsity graphs
    std::size_t block_size = 1;
    if (a.rank() == 2)
    {
      const std::vector<std::size_t> _bs(a.rank(), dofmaps[0]->block_size);
      block_size = (block_sizes == _bs) ? dofmaps[0]->block_size : 1;
    }

    // Initialise tensor layout
    tensor_layout->init(a.mesh().mpi_comm(), global_dimensions, block_size,
                        local_range);

    // Build sparsity pattern if required
    if (tensor_layout->sparsity_pattern())
    {
      GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern();
      SparsityPatternBuilder::build(pattern,
                                a.mesh(), dofmaps,
                                a.ufc_form()->has_cell_integrals(),
                                a.ufc_form()->has_interior_facet_integrals(),
                                a.ufc_form()->has_exterior_facet_integrals(),
                                keep_diagonal);
    }
    t0.stop();

    // Initialize tensor
    Timer t1("Init tensor");
    A.init(*tensor_layout);
    t1.stop();

    // Insert zeros on the diagonal as diagonal entries may be prematurely
    // optimised away by the linear algebra backend when calling
    // GenericMatrix::apply, e.g. PETSc does this then errors when matrices
    // have no diagonal entry inserted.
    if (A.rank() == 2 && keep_diagonal)
    {
      // Down cast to GenericMatrix
      GenericMatrix& _A = A.down_cast<GenericMatrix>();

      // Loop over rows and insert 0.0 on the diagonal
      const double block = 0.0;
      const std::pair<std::size_t, std::size_t> row_range = A.local_range(0);
      const std::size_t range = std::min(row_range.second, A.size(1));
      for (std::size_t i = row_range.first; i < range; i++)
      {
        dolfin::la_index _i = i;
        _A.set(&block, 1, &_i, 1, &_i);
      }
      A.apply("flush");
    }

    // Delete sparsity pattern
    Timer t2("Delete sparsity");
    t2.stop();
  }
  else
  {
    // If tensor is not reset, check that dimensions are correct
    for (std::size_t i = 0; i < a.rank(); ++i)
    {
      if (A.size(i) != dofmaps[i]->global_dimension())
      {
        dolfin_error("AssemblerBase.cpp",
                     "assemble form",
                     "Reset of tensor in assembly not requested, but dim %d of tensor does not match form", i);
      }
    }
  }

  if (!add_values)
    A.zero();
}
示例#2
0
//-----------------------------------------------------------------------------
void AssemblerBase::init_global_tensor(GenericTensor& A, const Form& a)
{
    dolfin_assert(a.ufc_form());

    // Get dof maps
    std::vector<const GenericDofMap*> dofmaps;
    for (std::size_t i = 0; i < a.rank(); ++i)
        dofmaps.push_back(a.function_space(i)->dofmap().get());

    if (A.empty())
    {
        Timer t0("Build sparsity");

        // Create layout for initialising tensor
        std::shared_ptr<TensorLayout> tensor_layout;
        tensor_layout = A.factory().create_layout(a.rank());
        dolfin_assert(tensor_layout);

        // Get dimensions and mapping across processes for each dimension
        std::vector<std::shared_ptr<const IndexMap> > index_maps;
        for (std::size_t i = 0; i < a.rank(); i++)
        {
            dolfin_assert(dofmaps[i]);
            index_maps.push_back(dofmaps[i]->index_map());
        }

        // Initialise tensor layout
        // FIXME: somewhere need to check block sizes are same on both axes
        // NOTE: Jan: that will be done on the backend side; IndexMap will
        //            provide tabulate functions with arbitrary block size;
        //            moreover the functions will tabulate directly using a
        //            correct int type
        tensor_layout->init(a.mesh().mpi_comm(), index_maps,
                            TensorLayout::Ghosts::UNGHOSTED);

        // Build sparsity pattern if required
        if (tensor_layout->sparsity_pattern())
        {
            GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern();
            SparsityPatternBuilder::build(pattern,
                                          a.mesh(), dofmaps,
                                          a.ufc_form()->has_cell_integrals(),
                                          a.ufc_form()->has_interior_facet_integrals(),
                                          a.ufc_form()->has_exterior_facet_integrals(),
                                          a.ufc_form()->has_vertex_integrals(),
                                          keep_diagonal);
        }
        t0.stop();

        // Initialize tensor
        Timer t1("Init tensor");
        A.init(*tensor_layout);
        t1.stop();

        // Insert zeros on the diagonal as diagonal entries may be
        // prematurely optimised away by the linear algebra backend when
        // calling GenericMatrix::apply, e.g. PETSc does this then errors
        // when matrices have no diagonal entry inserted.
        if (A.rank() == 2 && keep_diagonal)
        {
            // Down cast to GenericMatrix
            GenericMatrix& _matA = A.down_cast<GenericMatrix>();

            // Loop over rows and insert 0.0 on the diagonal
            const double block = 0.0;
            const std::pair<std::size_t, std::size_t> row_range = A.local_range(0);
            const std::size_t range = std::min(row_range.second, A.size(1));
            for (std::size_t i = row_range.first; i < range; i++)
            {
                dolfin::la_index _i = i;
                _matA.set(&block, 1, &_i, 1, &_i);
            }
            A.apply("flush");
        }

        // Delete sparsity pattern
        Timer t2("Delete sparsity");
        t2.stop();
    }
    else
    {
        // If tensor is not reset, check that dimensions are correct
        for (std::size_t i = 0; i < a.rank(); ++i)
        {
            if (A.size(i) != dofmaps[i]->global_dimension())
            {
                dolfin_error("AssemblerBase.cpp",
                             "assemble form",
                             "Dim %d of tensor does not match form", i);
            }
        }
    }

    if (!add_values)
        A.zero();
}
示例#3
0
//-----------------------------------------------------------------------------
void MultiMeshAssembler::_init_global_tensor(GenericTensor& A,
                                             const MultiMeshForm& a)
{
  log(PROGRESS, "Initializing global tensor.");

  // This function initializes the big system matrix corresponding to
  // all dofs (including inactive dofs) on all parts of the MultiMesh
  // function space.

  // Create layout for initializing tensor
  std::shared_ptr<TensorLayout> tensor_layout;
  tensor_layout = A.factory().create_layout(a.rank());
  dolfin_assert(tensor_layout);

  // Get dimensions
  std::vector<std::shared_ptr<const IndexMap>> index_maps;
  for (std::size_t i = 0; i < a.rank(); i++)
  {
    std::shared_ptr<const MultiMeshFunctionSpace> V = a.function_space(i);
    dolfin_assert(V);

    index_maps.push_back(std::shared_ptr<const IndexMap>
                         (new IndexMap(MPI_COMM_WORLD, V->dim(), 1)));
  }

  // Initialise tensor layout
  tensor_layout->init(MPI_COMM_WORLD, index_maps,
                      TensorLayout::Ghosts::UNGHOSTED);

  // Build sparsity pattern if required
  if (tensor_layout->sparsity_pattern())
  {
    GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern();
    SparsityPatternBuilder::build_multimesh_sparsity_pattern(pattern, a);
  }

  // Initialize tensor
  A.init(*tensor_layout);

  // Insert zeros on the diagonal as diagonal entries may be prematurely
  // optimised away by the linear algebra backend when calling
  // GenericMatrix::apply, e.g. PETSc does this then errors when matrices
  // have no diagonal entry inserted.
  if (A.rank() == 2)
  {
    // Down cast to GenericMatrix
    GenericMatrix& _matA = A.down_cast<GenericMatrix>();

    // Loop over rows and insert 0.0 on the diagonal
    const double block = 0.0;
    const std::pair<std::size_t, std::size_t> row_range = A.local_range(0);
    const std::size_t range = std::min(row_range.second, A.size(1));
    for (std::size_t i = row_range.first; i < range; i++)
    {
      dolfin::la_index _i = i;
      _matA.set(&block, 1, &_i, 1, &_i);
    }
    A.apply("flush");
  }

  // Set tensor to zero
  A.zero();
}