//----------------------------------------------------------------------------- void AssemblerBase::check(const Form& a) { dolfin_assert(a.ufc_form()); // Check the form a.check(); // Extract mesh and coefficients const Mesh& mesh = a.mesh(); const std::vector<std::shared_ptr<const GenericFunction>> coefficients = a.coefficients(); // Check that we get the correct number of coefficients if (coefficients.size() != a.num_coefficients()) { dolfin_error("AssemblerBase.cpp", "assemble form", "Incorrect number of coefficients (got %d but expecting %d)", coefficients.size(), a.num_coefficients()); } // Check that all coefficients have valid value dimensions for (std::size_t i = 0; i < coefficients.size(); ++i) { if (!coefficients[i]) { dolfin_error("AssemblerBase.cpp", "assemble form", "Coefficient number %d (\"%s\") has not been set", i, a.coefficient_name(i).c_str()); } // unique_ptr deletes its object when it exits its scope std::unique_ptr<ufc::finite_element> fe(a.ufc_form()->create_finite_element(i + a.rank())); // Checks out-commented since they only work for Functions, not // Expressions const std::size_t r = coefficients[i]->value_rank(); const std::size_t fe_r = fe->value_rank(); if (fe_r != r) { dolfin_error("AssemblerBase.cpp", "assemble form", "Invalid value rank for coefficient %d (got %d but expecting %d). \ You might have forgotten to specify the value rank correctly in an Expression subclass", i, r, fe_r); } for (std::size_t j = 0; j < r; ++j) { const std::size_t dim = coefficients[i]->value_dimension(j); const std::size_t fe_dim = fe->value_dimension(j); if (dim != fe_dim) { dolfin_error("AssemblerBase.cpp", "assemble form", "Invalid value dimension %d for coefficient %d (got %d but expecting %d). \ You might have forgotten to specify the value dimension correctly in an Expression subclass", j, i, dim, fe_dim); } }
//---------------------------------------------------------------------------- void OpenMpAssembler::assemble(GenericTensor& A, const Form& a) { // Get mesh const Mesh& mesh = a.mesh(); if (MPI::size(mesh.mpi_comm()) > 1) { dolfin_error("OpenMPAssembler.cpp", "perform multithreaded assembly using OpenMP assembler", "The OpenMp assembler has not been tested in combination with MPI"); } dolfin_assert(a.ufc_form()); // All assembler functions above end up calling this function, which // in turn calls the assembler functions below to assemble over // cells, exterior and interior facets. Note the importance of // treating empty mesh functions as null pointers for the PyDOLFIN // interface. // Get cell domains std::shared_ptr<const MeshFunction<std::size_t>> cell_domains = a.cell_domains(); // Get exterior facet domains std::shared_ptr<const MeshFunction<std::size_t>> exterior_facet_domains = a.exterior_facet_domains(); // Get interior facet domains std::shared_ptr<const MeshFunction<std::size_t>> interior_facet_domains = a.interior_facet_domains(); // Check form AssemblerBase::check(a); // Create data structure for local assembly data UFC ufc(a); // Initialize global tensor init_global_tensor(A, a); // FIXME: The below selections should be made robust if (a.ufc_form()->has_interior_facet_integrals()) assemble_interior_facets(A, a, ufc, interior_facet_domains, cell_domains, 0); if (a.ufc_form()->has_exterior_facet_integrals()) { assemble_cells_and_exterior_facets(A, a, ufc, cell_domains, exterior_facet_domains, 0); } else assemble_cells(A, a, ufc, cell_domains, 0); // Finalize assembly of global tensor if (finalize_tensor) A.apply("add"); }
//----------------------------------------------------------------------------- void AssemblerBase::init_global_tensor(GenericTensor& A, const Form& a) { dolfin_assert(a.ufc_form()); // Get dof maps std::vector<const GenericDofMap*> dofmaps; for (std::size_t i = 0; i < a.rank(); ++i) dofmaps.push_back(a.function_space(i)->dofmap().get()); if (A.empty()) { Timer t0("Build sparsity"); // Create layout for initialising tensor std::shared_ptr<TensorLayout> tensor_layout; tensor_layout = A.factory().create_layout(a.rank()); dolfin_assert(tensor_layout); // Get dimensions and mapping across processes for each dimension std::vector<std::shared_ptr<const IndexMap> > index_maps; for (std::size_t i = 0; i < a.rank(); i++) { dolfin_assert(dofmaps[i]); index_maps.push_back(dofmaps[i]->index_map()); } // Initialise tensor layout // FIXME: somewhere need to check block sizes are same on both axes // NOTE: Jan: that will be done on the backend side; IndexMap will // provide tabulate functions with arbitrary block size; // moreover the functions will tabulate directly using a // correct int type tensor_layout->init(a.mesh().mpi_comm(), index_maps, TensorLayout::Ghosts::UNGHOSTED); // Build sparsity pattern if required if (tensor_layout->sparsity_pattern()) { GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern(); SparsityPatternBuilder::build(pattern, a.mesh(), dofmaps, a.ufc_form()->has_cell_integrals(), a.ufc_form()->has_interior_facet_integrals(), a.ufc_form()->has_exterior_facet_integrals(), a.ufc_form()->has_vertex_integrals(), keep_diagonal); } t0.stop(); // Initialize tensor Timer t1("Init tensor"); A.init(*tensor_layout); t1.stop(); // Insert zeros on the diagonal as diagonal entries may be // prematurely optimised away by the linear algebra backend when // calling GenericMatrix::apply, e.g. PETSc does this then errors // when matrices have no diagonal entry inserted. if (A.rank() == 2 && keep_diagonal) { // Down cast to GenericMatrix GenericMatrix& _matA = A.down_cast<GenericMatrix>(); // Loop over rows and insert 0.0 on the diagonal const double block = 0.0; const std::pair<std::size_t, std::size_t> row_range = A.local_range(0); const std::size_t range = std::min(row_range.second, A.size(1)); for (std::size_t i = row_range.first; i < range; i++) { dolfin::la_index _i = i; _matA.set(&block, 1, &_i, 1, &_i); } A.apply("flush"); } // Delete sparsity pattern Timer t2("Delete sparsity"); t2.stop(); } else { // If tensor is not reset, check that dimensions are correct for (std::size_t i = 0; i < a.rank(); ++i) { if (A.size(i) != dofmaps[i]->global_dimension()) { dolfin_error("AssemblerBase.cpp", "assemble form", "Dim %d of tensor does not match form", i); } } } if (!add_values) A.zero(); }
//----------------------------------------------------------------------------- void AssemblerBase::init_global_tensor(GenericTensor& A, const Form& a) { dolfin_assert(a.ufc_form()); // Get dof maps std::vector<const GenericDofMap*> dofmaps; for (std::size_t i = 0; i < a.rank(); ++i) dofmaps.push_back(a.function_space(i)->dofmap().get()); if (A.size(0) == 0) { Timer t0("Build sparsity"); // Create layout for initialising tensor std::shared_ptr<TensorLayout> tensor_layout; tensor_layout = A.factory().create_layout(a.rank()); dolfin_assert(tensor_layout); // Get dimensions std::vector<std::size_t> global_dimensions; std::vector<std::pair<std::size_t, std::size_t> > local_range; std::vector<std::size_t> block_sizes; for (std::size_t i = 0; i < a.rank(); i++) { dolfin_assert(dofmaps[i]); global_dimensions.push_back(dofmaps[i]->global_dimension()); local_range.push_back(dofmaps[i]->ownership_range()); block_sizes.push_back(dofmaps[i]->block_size); } // Set block size for sparsity graphs std::size_t block_size = 1; if (a.rank() == 2) { const std::vector<std::size_t> _bs(a.rank(), dofmaps[0]->block_size); block_size = (block_sizes == _bs) ? dofmaps[0]->block_size : 1; } // Initialise tensor layout tensor_layout->init(a.mesh().mpi_comm(), global_dimensions, block_size, local_range); // Build sparsity pattern if required if (tensor_layout->sparsity_pattern()) { GenericSparsityPattern& pattern = *tensor_layout->sparsity_pattern(); SparsityPatternBuilder::build(pattern, a.mesh(), dofmaps, a.ufc_form()->has_cell_integrals(), a.ufc_form()->has_interior_facet_integrals(), a.ufc_form()->has_exterior_facet_integrals(), keep_diagonal); } t0.stop(); // Initialize tensor Timer t1("Init tensor"); A.init(*tensor_layout); t1.stop(); // Insert zeros on the diagonal as diagonal entries may be prematurely // optimised away by the linear algebra backend when calling // GenericMatrix::apply, e.g. PETSc does this then errors when matrices // have no diagonal entry inserted. if (A.rank() == 2 && keep_diagonal) { // Down cast to GenericMatrix GenericMatrix& _A = A.down_cast<GenericMatrix>(); // Loop over rows and insert 0.0 on the diagonal const double block = 0.0; const std::pair<std::size_t, std::size_t> row_range = A.local_range(0); const std::size_t range = std::min(row_range.second, A.size(1)); for (std::size_t i = row_range.first; i < range; i++) { dolfin::la_index _i = i; _A.set(&block, 1, &_i, 1, &_i); } A.apply("flush"); } // Delete sparsity pattern Timer t2("Delete sparsity"); t2.stop(); } else { // If tensor is not reset, check that dimensions are correct for (std::size_t i = 0; i < a.rank(); ++i) { if (A.size(i) != dofmaps[i]->global_dimension()) { dolfin_error("AssemblerBase.cpp", "assemble form", "Reset of tensor in assembly not requested, but dim %d of tensor does not match form", i); } } } if (!add_values) A.zero(); }