Beispiel #1
0
int main(int argc, char** argv) {
  double br=0.;
  if (argc>1) br=0.1;
  M5T const m(br);

  Basic3DVector<float>  axis(0.5,1.,1);
  
  Surface::RotationType rot(axis,0.5*M_PI);
  std::cout << rot << std::endl;

  Surface::PositionType pos( 0., 0., 0.);

  Plane plane(pos,rot);
  LocalTrajectoryParameters tpl(-1./3.5, 1.,1., 0.,0.,1.);
  GlobalVector mg = plane.toGlobal(tpl.momentum());
  GlobalTrajectoryParameters tpg(pos,mg,-1., &m);
  double curv =   tpg.transverseCurvature();
  std::cout << curv << " " <<  mg.mag() << std::endl;
  std::cout << tpg.position() << " " << tpg.momentum() << std::endl;

  GlobalTrajectoryParameters tpg0(tpg);

  //HelixForwardPlaneCrossing::PositionType zero(0.,0.,0.); 
  GlobalPoint zero(0.,0.,0.); 
  std::cout << std::endl;
Beispiel #2
0
void Process::setInitialConditions(const int process_id, double const t,
                                   GlobalVector& x)
{
    // getDOFTableOfProcess can be overloaded by the specific process.
    auto const& dof_table_of_process = getDOFTable(process_id);

    auto const& per_process_variables = _process_variables[process_id];
    for (std::size_t variable_id = 0;
         variable_id < per_process_variables.size();
         variable_id++)
    {
        SpatialPosition pos;

        auto const& pv = per_process_variables[variable_id];
        DBUG("Set the initial condition of variable %s of process %d.",
             pv.get().getName().data(), process_id);

        auto const& ic = pv.get().getInitialCondition();

        auto const num_comp = pv.get().getNumberOfComponents();

        for (int component_id = 0; component_id < num_comp; ++component_id)
        {
            auto const& mesh_subset =
                dof_table_of_process.getMeshSubset(variable_id, component_id);
            auto const mesh_id = mesh_subset.getMeshID();
            for (auto const* node : mesh_subset.getNodes())
            {
                MeshLib::Location const l(mesh_id, MeshLib::MeshItemType::Node,
                                          node->getID());

                pos.setNodeID(node->getID());
                auto const& ic_value = ic(t, pos);

                auto global_index =
                    std::abs(dof_table_of_process.getGlobalIndex(l, variable_id,
                                                                 component_id));
#ifdef USE_PETSC
                // The global indices of the ghost entries of the global
                // matrix or the global vectors need to be set as negative
                // values for equation assembly, however the global indices
                // start from zero. Therefore, any ghost entry with zero
                // index is assigned an negative value of the vector size
                // or the matrix dimension. To assign the initial value for
                // the ghost entries, the negative indices of the ghost
                // entries are restored to zero.
                if (global_index == x.size())
                    global_index = 0;
#endif
                x.set(global_index, ic_value[component_id]);
            }
        }
    }
}
Beispiel #3
0
// TODO that essentially duplicates code which is also present in ProcessOutput.
double getNodalValue(GlobalVector const& x, MeshLib::Mesh const& mesh,
                     NumLib::LocalToGlobalIndexMap const& dof_table,
                     std::size_t const node_id,
                     std::size_t const global_component_id)
{
    MeshLib::Location const l{mesh.getID(), MeshLib::MeshItemType::Node,
                              node_id};

    auto const index = dof_table.getLocalIndex(
        l, global_component_id, x.getRangeBegin(), x.getRangeEnd());

    return x.get(index);
}
Beispiel #4
0
void Process::setInitialConditions(double const t, GlobalVector& x)
{
    DBUG("Set initial conditions.");

    SpatialPosition pos;

    for (int variable_id = 0;
         variable_id < static_cast<int>(_process_variables.size());
         ++variable_id)
    {
        ProcessVariable& pv = _process_variables[variable_id];
        auto const& ic = pv.getInitialCondition();

        auto const num_comp = pv.getNumberOfComponents();

        for (int component_id = 0; component_id < num_comp; ++component_id)
        {
            auto const& mesh_subsets =
                _local_to_global_index_map->getMeshSubsets(variable_id,
                                                           component_id);
            for (auto const& mesh_subset : mesh_subsets)
            {
                auto const mesh_id = mesh_subset->getMeshID();
                for (auto const* node : mesh_subset->getNodes())
                {
                    MeshLib::Location const l(
                        mesh_id, MeshLib::MeshItemType::Node, node->getID());

                    pos.setNodeID(node->getID());
                    auto const& ic_value = ic(t, pos);

                    auto global_index =
                        std::abs(_local_to_global_index_map->getGlobalIndex(
                            l, variable_id, component_id));
#ifdef USE_PETSC
                    // The global indices of the ghost entries of the global matrix
                    // or the global vectors need to be set as negative values for
                    // equation assembly, however the global indices start from
                    // zero. Therefore, any ghost entry with zero index is assigned
                    // an negative value of the vector size or the matrix dimension.
                    // To assign the initial value for the ghost entries, the
                    // negative indices of the ghost entries are restored to zero.
                    if (global_index == x.size())
                        global_index = 0;
#endif
                    x.set(global_index, ic_value[component_id]);
                }
            }
        }
    }
}
void SmallDeformationNonlocalProcess<DisplacementDim>::
    assembleWithJacobianConcreteProcess(const double t, GlobalVector const& x,
                                        GlobalVector const& xdot,
                                        const double dxdot_dx,
                                        const double dx_dx, GlobalMatrix& M,
                                        GlobalMatrix& K, GlobalVector& b,
                                        GlobalMatrix& Jac)
{
    DBUG("AssembleWithJacobian SmallDeformationNonlocalProcess.");

    std::vector<std::reference_wrapper<NumLib::LocalToGlobalIndexMap>>
        dof_table = {std::ref(*_local_to_global_index_map)};

    const int process_id = 0;
    ProcessLib::ProcessVariable const& pv = getProcessVariables(process_id)[0];

    // Call global assembler for each local assembly item.
    GlobalExecutor::executeSelectedMemberDereferenced(
        _global_assembler, &VectorMatrixAssembler::assembleWithJacobian,
        _local_assemblers, pv.getActiveElementIDs(), dof_table, t, x, xdot,
        dxdot_dx, dx_dx, M, K, b, Jac, _coupled_solutions);

    b.copyValues(*_nodal_forces);
    std::transform(_nodal_forces->begin(), _nodal_forces->end(),
                   _nodal_forces->begin(), [](double val) { return -val; });
}
void VectorMatrixAssembler::preAssemble(
    const std::size_t mesh_item_id, LocalAssemblerInterface& local_assembler,
    const NumLib::LocalToGlobalIndexMap& dof_table, const double t,
    const GlobalVector& x)
{
    auto const indices = NumLib::getIndices(mesh_item_id, dof_table);
    auto const local_x = x.get(indices);

    local_assembler.preAssemble(t, local_x);
}
void oldCode(const GlobalPoint & P1, const GlobalPoint & P2) {

  typedef TkRotation<double> Rotation;
  typedef Basic2DVector<double> Point2D;

  GlobalVector aX = GlobalVector( P1.x(), P1.y(), 0.).unit();
  GlobalVector aY( -aX.y(), aX.x(), 0.); 
  GlobalVector aZ( 0., 0., 1.);
  TkRotation<double > theRotation = Rotation(aX,aY,aZ); 

  PointUV p1(Point2D(P1.x(),P1.y()), &theRotation);
  PointUV p2(Point2D(P2.x(),P2.y()), &theRotation);

  std::cout << "\nold for " << P1 <<", " << P2 << std::endl;
  std::cout << theRotation << std::endl;
  std::cout << p1.u() << " " << p1.v() << std::endl;
  std::cout << p2.u() << " " << p2.v() << std::endl;
  std::cout << p1.unmap() << std::endl;
  std::cout << p2.unmap() << std::endl;

}
Beispiel #8
0
Eigen::Vector3d HTProcess::getFlux(std::size_t element_id,
                                   MathLib::Point3d const& p,
                                   double const t,
                                   GlobalVector const& x) const
{
    // fetch local_x from primary variable
    std::vector<GlobalIndexType> indices_cache;
    auto const r_c_indices = NumLib::getRowColumnIndices(
        element_id, *_local_to_global_index_map, indices_cache);
    std::vector<double> local_x(x.get(r_c_indices.rows));

    return _local_assemblers[element_id]->getFlux(p, t, local_x);
}
Beispiel #9
0
void getVectorValues(
    GlobalVector const& x,
    NumLib::LocalToGlobalIndexMap::RowColumnIndices const& r_c_indices,
    std::vector<double>& local_x)
{
    auto const& indices = r_c_indices.rows;
    local_x.clear();
    local_x.reserve(indices.size());

    for (auto i : indices)
    {
        local_x.emplace_back(x.get(i));
    }
}
Beispiel #10
0
void PhaseFieldProcess<DisplacementDim>::assembleWithJacobianConcreteProcess(
    const double t, GlobalVector const& x, GlobalVector const& xdot,
    const double dxdot_dx, const double dx_dx, GlobalMatrix& M, GlobalMatrix& K,
    GlobalVector& b, GlobalMatrix& Jac)
{
    std::vector<std::reference_wrapper<NumLib::LocalToGlobalIndexMap>>
        dof_tables;

    // For the monolithic scheme
    if (_use_monolithic_scheme)
    {
        DBUG("AssembleJacobian PhaseFieldProcess for the monolithic scheme.");
        dof_tables.emplace_back(*_local_to_global_index_map);
    }
    else
    {
        // For the staggered scheme
        if (_coupled_solutions->process_id == 1)
        {
            DBUG(
                "Assemble the Jacobian equations of phase field in "
                "PhaseFieldProcess for the staggered scheme.");
        }
        else
        {
            DBUG(
                "Assemble the Jacobian equations of deformation in "
                "PhaseFieldProcess for the staggered scheme.");
        }
        dof_tables.emplace_back(*_local_to_global_index_map);
        dof_tables.emplace_back(*_local_to_global_index_map_single_component);
    }
    // Call global assembler for each local assembly item.

    GlobalExecutor::executeMemberDereferenced(
        _global_assembler, &VectorMatrixAssembler::assembleWithJacobian,
        _local_assemblers, dof_tables, t, x, xdot, dxdot_dx, dx_dx, M, K, b,
        Jac, _coupled_solutions);

    if (!_use_monolithic_scheme && (_coupled_solutions->process_id == 0))
    {
        b.copyValues(*_nodal_forces);
        std::transform(_nodal_forces->begin(), _nodal_forces->end(),
                       _nodal_forces->begin(), [](double val) { return -val; });
    }
}
Beispiel #11
0
NumLib::IterationResult TESProcess::postIterationConcreteProcess(
    GlobalVector const& x)
{
    bool check_passed = true;

    if (!Trafo::constrained)
    {
        // bounds checking only has to happen if the vapour mass fraction is
        // non-logarithmic.

        std::vector<GlobalIndexType> indices_cache;
        std::vector<double> local_x_cache;
        std::vector<double> local_x_prev_ts_cache;

        auto check_variable_bounds = [&](std::size_t id,
                                         TESLocalAssemblerInterface& loc_asm) {
            auto const r_c_indices = NumLib::getRowColumnIndices(
                id, *this->_local_to_global_index_map, indices_cache);
            local_x_cache = x.get(r_c_indices.rows);
            local_x_prev_ts_cache = _x_previous_timestep->get(r_c_indices.rows);

            if (!loc_asm.checkBounds(local_x_cache, local_x_prev_ts_cache))
                check_passed = false;
        };

        GlobalExecutor::executeDereferenced(check_variable_bounds,
                                         _local_assemblers);
    }

    if (!check_passed)
        return NumLib::IterationResult::REPEAT_ITERATION;

    // TODO remove
    DBUG("ts %lu iteration %lu (in current ts: %lu) try %u accepted",
         _assembly_params.timestep, _assembly_params.total_iteration,
         _assembly_params.iteration_in_current_timestep,
         _assembly_params.number_of_try_of_iteration);

    _assembly_params.number_of_try_of_iteration = 0;

    return NumLib::IterationResult::SUCCESS;
}
void VectorMatrixAssembler::assemble(
    const std::size_t mesh_item_id, LocalAssemblerInterface& local_assembler,
    std::vector<std::reference_wrapper<NumLib::LocalToGlobalIndexMap>> const&
        dof_tables,
    const double t, const GlobalVector& x, GlobalMatrix& M, GlobalMatrix& K,
    GlobalVector& b, CoupledSolutionsForStaggeredScheme const* const cpl_xs)
{
    std::vector<std::vector<GlobalIndexType>> indices_of_processes;
    indices_of_processes.reserve(dof_tables.size());
    for (std::size_t i = 0; i < dof_tables.size(); i++)
    {
        indices_of_processes.emplace_back(
            NumLib::getIndices(mesh_item_id, dof_tables[i].get()));
    }

    auto const& indices = (cpl_xs == nullptr)
                              ? indices_of_processes[0]
                              : indices_of_processes[cpl_xs->process_id];
    _local_M_data.clear();
    _local_K_data.clear();
    _local_b_data.clear();

    if (cpl_xs == nullptr)
    {
        auto const local_x = x.get(indices);
        local_assembler.assemble(t, local_x, _local_M_data, _local_K_data,
                                 _local_b_data);
    }
    else
    {
        auto local_coupled_xs0 =
            getPreviousLocalSolutions(*cpl_xs, indices_of_processes);
        auto local_coupled_xs =
            getCurrentLocalSolutions(*cpl_xs, indices_of_processes);

        ProcessLib::LocalCoupledSolutions local_coupled_solutions(
            cpl_xs->dt, cpl_xs->process_id, std::move(local_coupled_xs0),
            std::move(local_coupled_xs));

        local_assembler.assembleForStaggeredScheme(t, _local_M_data,
                                                   _local_K_data, _local_b_data,
                                                   local_coupled_solutions);
    }

    auto const num_r_c = indices.size();
    auto const r_c_indices =
        NumLib::LocalToGlobalIndexMap::RowColumnIndices(indices, indices);

    if (!_local_M_data.empty())
    {
        auto const local_M = MathLib::toMatrix(_local_M_data, num_r_c, num_r_c);
        M.add(r_c_indices, local_M);
    }
    if (!_local_K_data.empty())
    {
        auto const local_K = MathLib::toMatrix(_local_K_data, num_r_c, num_r_c);
        K.add(r_c_indices, local_K);
    }
    if (!_local_b_data.empty())
    {
        assert(_local_b_data.size() == num_r_c);
        b.add(indices, _local_b_data);
    }
}
void VectorMatrixAssembler::assembleWithJacobian(
    std::size_t const mesh_item_id, LocalAssemblerInterface& local_assembler,
    std::vector<std::reference_wrapper<NumLib::LocalToGlobalIndexMap>> const&
        dof_tables,
    const double t, GlobalVector const& x, GlobalVector const& xdot,
    const double dxdot_dx, const double dx_dx, GlobalMatrix& M, GlobalMatrix& K,
    GlobalVector& b, GlobalMatrix& Jac,
    CoupledSolutionsForStaggeredScheme const* const cpl_xs)
{
    std::vector<std::vector<GlobalIndexType>> indices_of_processes;
    indices_of_processes.reserve(dof_tables.size());
    for (std::size_t i = 0; i < dof_tables.size(); i++)
    {
        indices_of_processes.emplace_back(
            NumLib::getIndices(mesh_item_id, dof_tables[i].get()));
    }

    auto const& indices = (cpl_xs == nullptr)
                              ? indices_of_processes[0]
                              : indices_of_processes[cpl_xs->process_id];
    auto const local_xdot = xdot.get(indices);

    _local_M_data.clear();
    _local_K_data.clear();
    _local_b_data.clear();
    _local_Jac_data.clear();

    if (cpl_xs == nullptr)
    {
        auto const local_x = x.get(indices);
        _jacobian_assembler->assembleWithJacobian(
            local_assembler, t, local_x, local_xdot, dxdot_dx, dx_dx,
            _local_M_data, _local_K_data, _local_b_data, _local_Jac_data);
    }
    else
    {
        auto local_coupled_xs0 =
            getPreviousLocalSolutions(*cpl_xs, indices_of_processes);
        auto local_coupled_xs =
            getCurrentLocalSolutions(*cpl_xs, indices_of_processes);

        ProcessLib::LocalCoupledSolutions local_coupled_solutions(
            cpl_xs->dt, cpl_xs->process_id, std::move(local_coupled_xs0),
            std::move(local_coupled_xs));

        _jacobian_assembler->assembleWithJacobianForStaggeredScheme(
            local_assembler, t, local_xdot, dxdot_dx, dx_dx, _local_M_data,
            _local_K_data, _local_b_data, _local_Jac_data,
            local_coupled_solutions);
    }

    auto const num_r_c = indices.size();
    auto const r_c_indices =
        NumLib::LocalToGlobalIndexMap::RowColumnIndices(indices, indices);

    if (!_local_M_data.empty())
    {
        auto const local_M = MathLib::toMatrix(_local_M_data, num_r_c, num_r_c);
        M.add(r_c_indices, local_M);
    }
    if (!_local_K_data.empty())
    {
        auto const local_K = MathLib::toMatrix(_local_K_data, num_r_c, num_r_c);
        K.add(r_c_indices, local_K);
    }
    if (!_local_b_data.empty())
    {
        assert(_local_b_data.size() == num_r_c);
        b.add(indices, _local_b_data);
    }
    if (!_local_Jac_data.empty())
    {
        auto const local_Jac =
            MathLib::toMatrix(_local_Jac_data, num_r_c, num_r_c);
        Jac.add(r_c_indices, local_Jac);
    }
    else
    {
        OGS_FATAL(
            "No Jacobian has been assembled! This might be due to programming "
            "errors in the local assembler of the current process.");
    }
}
void LocalLinearLeastSquaresExtrapolator::extrapolateElement(
    std::size_t const element_index,
    const unsigned num_components,
    ExtrapolatableElementCollection const& extrapolatables,
    const double t,
    GlobalVector const& current_solution,
    LocalToGlobalIndexMap const& dof_table,
    GlobalVector& counts)
{
    auto const& integration_point_values =
        extrapolatables.getIntegrationPointValues(
            element_index, t, current_solution, dof_table,
            _integration_point_values_cache);

    auto const& N_0 = extrapolatables.getShapeMatrix(element_index, 0);
    auto const num_nodes = static_cast<unsigned>(N_0.cols());
    auto const num_values =
        static_cast<unsigned>(integration_point_values.size());

    if (num_values % num_components != 0)
        OGS_FATAL(
            "The number of computed integration point values is not divisable "
            "by the number of num_components. Maybe the computed property is "
            "not a %d-component vector for each integration point.",
            num_components);

    // number of integration points in the element
    const auto num_int_pts = num_values / num_components;

    if (num_int_pts < num_nodes)
        OGS_FATAL(
            "Least squares is not possible if there are more nodes than"
            "integration points.");

    auto const pair_it_inserted = _qr_decomposition_cache.emplace(
        std::make_pair(num_nodes, num_int_pts), CachedData{});

    auto& cached_data = pair_it_inserted.first->second;
    if (pair_it_inserted.second)
    {
        DBUG("Computing new singular value decomposition");

        // interpolation_matrix * nodal_values = integration_point_values
        // We are going to pseudo-invert this relation now using singular value
        // decomposition.
        auto& interpolation_matrix = cached_data.A;
        interpolation_matrix.resize(num_int_pts, num_nodes);

        interpolation_matrix.row(0) = N_0;
        for (unsigned int_pt = 1; int_pt < num_int_pts; ++int_pt)
        {
            auto const& shp_mat =
                extrapolatables.getShapeMatrix(element_index, int_pt);
            assert(shp_mat.cols() == num_nodes);

            // copy shape matrix to extrapolation matrix row-wise
            interpolation_matrix.row(int_pt) = shp_mat;
        }

        // JacobiSVD is extremely reliable, but fast only for small matrices.
        // But we usually have small matrices and we don't compute very often.
        // Cf.
        // http://eigen.tuxfamily.org/dox/group__TopicLinearAlgebraDecompositions.html
        //
        // Decomposes interpolation_matrix = U S V^T.
        Eigen::JacobiSVD<Eigen::MatrixXd> svd(
            interpolation_matrix, Eigen::ComputeThinU | Eigen::ComputeThinV);

        auto const& S = svd.singularValues();
        auto const& U = svd.matrixU();
        auto const& V = svd.matrixV();

        // Compute and save the pseudo inverse V * S^{-1} * U^T.
        auto const rank = svd.rank();
        assert(rank == num_nodes);

        // cf. http://eigen.tuxfamily.org/dox/JacobiSVD_8h_source.html
        cached_data.A_pinv.noalias() = V.leftCols(rank) *
                                       S.head(rank).asDiagonal().inverse() *
                                       U.leftCols(rank).transpose();
    }
    else if (cached_data.A.row(0) != N_0)
    {
        OGS_FATAL("The cached and the passed shapematrices differ.");
    }

    auto const& global_indices =
        _dof_table_single_component(element_index, 0).rows;

    if (num_components == 1)
    {
        auto const integration_point_values_vec =
            MathLib::toVector(integration_point_values);

        // Apply the pre-computed pseudo-inverse.
        Eigen::VectorXd const nodal_values =
            cached_data.A_pinv * integration_point_values_vec;

        // TODO does that give rise to PETSc problems? E.g., writing to ghost
        // nodes? Furthermore: Is ghost nodes communication necessary for PETSc?
        _nodal_values->add(global_indices, nodal_values);
        counts.add(global_indices,
                   std::vector<double>(global_indices.size(), 1.0));
    }
    else
    {
        auto const integration_point_values_mat = MathLib::toMatrix(
            integration_point_values, num_components, num_int_pts);

        // Apply the pre-computed pseudo-inverse.
        Eigen::MatrixXd const nodal_values =
            cached_data.A_pinv * integration_point_values_mat.transpose();

        std::vector<GlobalIndexType> indices;
        indices.reserve(num_components * global_indices.size());

        // _nodal_values is ordered location-wise
        for (unsigned comp = 0; comp < num_components; ++comp)
        {
            for (auto i : global_indices)
            {
                indices.push_back(num_components * i + comp);
            }
        }

        // Nodal_values are passed as a raw pointer, because PETScVector and
        // EigenVector implementations differ slightly.
        _nodal_values->add(indices, nodal_values.data());
        counts.add(indices, std::vector<double>(indices.size(), 1.0));
    }
}