std::pair<
shared_ptr<typename HypersingularIntegralOperator<
BasisFunctionType, KernelType, ResultType>::LocalAssembler>,
shared_ptr<typename HypersingularIntegralOperator<
BasisFunctionType, KernelType, ResultType>::LocalAssembler>
>
HypersingularIntegralOperator<BasisFunctionType, KernelType, ResultType>::makeAssemblers(
        const QuadratureStrategy& quadStrategy,
        const AssemblyOptions& options) const
{
    typedef Fiber::RawGridGeometry<CoordinateType> RawGridGeometry;
    typedef std::vector<const Fiber::Shapeset<BasisFunctionType>*> ShapesetPtrVector;

    const bool verbose = (options.verbosityLevel() >= VerbosityLevel::DEFAULT);

    shared_ptr<RawGridGeometry> testRawGeometry, trialRawGeometry;
    shared_ptr<GeometryFactory> testGeometryFactory, trialGeometryFactory;
    shared_ptr<Fiber::OpenClHandler> openClHandler;
    shared_ptr<ShapesetPtrVector> testShapesets, trialShapesets;
    bool cacheSingularIntegrals;

    if (verbose)
        std::cout << "Collecting data for assembler construction..." << std::endl;
       this->collectDataForAssemblerConstruction(options,
                                        testRawGeometry, trialRawGeometry,
                                        testGeometryFactory, trialGeometryFactory,
                                        testShapesets, trialShapesets,
                                        openClHandler, cacheSingularIntegrals);
    if (verbose)
        std::cout << "Data collection finished." << std::endl;

    bool makeSeparateOffDiagonalAssembler =
        options.assemblyMode() == AssemblyOptions::ACA &&
        options.acaOptions().mode == AcaOptions::HYBRID_ASSEMBLY;

    return reallyMakeAssemblers(quadStrategy,
                                testGeometryFactory, trialGeometryFactory,
                                testRawGeometry, trialRawGeometry,
                                testShapesets, trialShapesets, openClHandler,
                                options.parallelizationOptions(),
                                options.verbosityLevel(),
                                cacheSingularIntegrals,
                                makeSeparateOffDiagonalAssembler);
}
Esempio n. 2
0
std::unique_ptr<DiscreteBoundaryOperator<ResultType>>
ElementaryLocalOperator<BasisFunctionType, ResultType>::
    assembleWeakFormInSparseMode(LocalAssembler &assembler,
                                 const AssemblyOptions &options) const {
#ifdef WITH_TRILINOS
  if (boost::is_complex<BasisFunctionType>::value)
    throw std::runtime_error(
        "ElementaryLocalOperator::assembleWeakFormInSparseMode(): "
        "sparse-mode assembly of identity operators for "
        "complex-valued basis functions is not supported yet");

  const Space<BasisFunctionType> &testSpace = *this->dualToRange();
  const Space<BasisFunctionType> &trialSpace = *this->domain();

  // Fill local submatrices
  const GridView &view = testSpace.gridView();
  const size_t elementCount = view.entityCount(0);
  std::vector<int> elementIndices(elementCount);
  for (size_t i = 0; i < elementCount; ++i)
    elementIndices[i] = i;
  std::vector<arma::Mat<ResultType>> localResult;
  assembler.evaluateLocalWeakForms(elementIndices, localResult);

  // Global DOF indices corresponding to local DOFs on elements
  std::vector<std::vector<GlobalDofIndex>> testGdofs(elementCount);
  std::vector<std::vector<GlobalDofIndex>> trialGdofs(elementCount);
  std::vector<std::vector<BasisFunctionType>> testLdofWeights(elementCount);
  std::vector<std::vector<BasisFunctionType>> trialLdofWeights(elementCount);
  gatherGlobalDofs(testSpace, trialSpace, testGdofs, trialGdofs,
                   testLdofWeights, trialLdofWeights);

  // Multiply matrix entries by DOF weights
  for (size_t e = 0; e < elementCount; ++e)
    for (size_t trialDof = 0; trialDof < trialGdofs[e].size(); ++trialDof)
      for (size_t testDof = 0; testDof < testGdofs[e].size(); ++testDof)
        localResult[e](testDof, trialDof) *=
            conj(testLdofWeights[e][testDof]) * trialLdofWeights[e][trialDof];

  // Estimate number of entries in each row

  //    This will be useful when we begin to use MPI
  //    // Get global DOF indices for which this process is responsible
  //    const int testGlobalDofCount = testSpace.globalDofCount();
  //    Epetra_Map rowMap(testGlobalDofCount, 0 /* index-base */, comm);
  //    std::vector<int> myTestGlobalDofs(rowMap.MyGlobalElements(),
  //                                      rowMap.MyGlobalElements() +
  //                                      rowMap.NumMyElements());
  //    const int myTestGlobalDofCount = myTestGlobalDofs.size();

  const int testGlobalDofCount = testSpace.globalDofCount();
  const int trialGlobalDofCount = trialSpace.globalDofCount();
  arma::Col<int> nonzeroEntryCountEstimates(testGlobalDofCount);
  nonzeroEntryCountEstimates.fill(0);

  // Upper estimate for the number of global trial DOFs coupled to a given
  // global test DOF: sum of the local trial DOF counts for each element that
  // contributes to the global test DOF in question
  for (size_t e = 0; e < elementCount; ++e)
    for (size_t testLdof = 0; testLdof < testGdofs[e].size(); ++testLdof) {
      int testGdof = testGdofs[e][testLdof];
      if (testGdof >= 0)
        nonzeroEntryCountEstimates(testGdof) += trialGdofs[e].size();
    }

  Epetra_SerialComm comm; // To be replaced once we begin to use MPI
  Epetra_LocalMap rowMap(testGlobalDofCount, 0 /* index_base */, comm);
  Epetra_LocalMap colMap(trialGlobalDofCount, 0 /* index_base */, comm);
  shared_ptr<Epetra_FECrsMatrix> result =
      boost::make_shared<Epetra_FECrsMatrix>(
          Copy, rowMap, colMap, nonzeroEntryCountEstimates.memptr());

  // TODO: make each process responsible for a subset of elements
  // Find maximum number of local dofs per element
  size_t maxLdofCount = 0;
  for (size_t e = 0; e < elementCount; ++e)
    maxLdofCount =
        std::max(maxLdofCount, testGdofs[e].size() * trialGdofs[e].size());

  // Initialise sparse matrix with zeros at required positions
  arma::Col<double> zeros(maxLdofCount);
  zeros.fill(0.);
  for (size_t e = 0; e < elementCount; ++e)
    result->InsertGlobalValues(testGdofs[e].size(), &testGdofs[e][0],
                               trialGdofs[e].size(), &trialGdofs[e][0],
                               zeros.memptr());
  // Add contributions from individual elements
  for (size_t e = 0; e < elementCount; ++e)
    epetraSumIntoGlobalValues(*result, testGdofs[e], trialGdofs[e],
                              localResult[e]);
  result->GlobalAssemble();

  // If assembly mode is equal to ACA and we have AHMED,
  // construct the block cluster tree. Otherwise leave it uninitialized.
  typedef ClusterConstructionHelper<BasisFunctionType> CCH;
  typedef AhmedDofWrapper<CoordinateType> AhmedDofType;
  typedef ExtendedBemCluster<AhmedDofType> AhmedBemCluster;
  typedef bbxbemblcluster<AhmedDofType, AhmedDofType> AhmedBemBlcluster;

  shared_ptr<AhmedBemBlcluster> blockCluster;
  shared_ptr<IndexPermutation> test_o2pPermutation, test_p2oPermutation;
  shared_ptr<IndexPermutation> trial_o2pPermutation, trial_p2oPermutation;
#ifdef WITH_AHMED
  if (options.assemblyMode() == AssemblyOptions::ACA) {
    const AcaOptions &acaOptions = options.acaOptions();
    bool indexWithGlobalDofs = acaOptions.mode != AcaOptions::HYBRID_ASSEMBLY;

    typedef ClusterConstructionHelper<BasisFunctionType> CCH;
    shared_ptr<AhmedBemCluster> testClusterTree;
    CCH::constructBemCluster(testSpace, indexWithGlobalDofs, acaOptions,
                             testClusterTree, test_o2pPermutation,
                             test_p2oPermutation);
    // TODO: construct a hermitian H-matrix if possible
    shared_ptr<AhmedBemCluster> trialClusterTree;
    CCH::constructBemCluster(trialSpace, indexWithGlobalDofs, acaOptions,
                             trialClusterTree, trial_o2pPermutation,
                             trial_p2oPermutation);
    unsigned int blockCount = 0;
    bool useStrongAdmissibilityCondition = !indexWithGlobalDofs;
    blockCluster.reset(CCH::constructBemBlockCluster(
        acaOptions, false /* hermitian */, *testClusterTree, *trialClusterTree,
        useStrongAdmissibilityCondition, blockCount).release());
  }
#endif

  // Create and return a discrete operator represented by the matrix that
  // has just been calculated
  return std::unique_ptr<DiscreteBoundaryOperator<ResultType>>(
      new DiscreteSparseBoundaryOperator<ResultType>(
          result, this->symmetry(), NO_TRANSPOSE, blockCluster,
          trial_o2pPermutation, test_o2pPermutation));
#else // WITH_TRILINOS
  throw std::runtime_error(
      "ElementaryLocalOperator::assembleWeakFormInSparseMode(): "
      "To enable assembly in sparse mode, recompile BEM++ "
      "with the symbol WITH_TRILINOS defined.");
#endif
}