void fillSubMatrix ( MatrixBase<T> & fillMatrix, MatrixBase<T> & subMatrix, unsigned int startRow, unsigned int startCol ) { for ( unsigned int i = 0; i < subMatrix.size(); i++ ) { for ( unsigned int j = 0; j < subMatrix.size(); j++ ) { fillMatrix( startRow + i , startCol + j ) = subMatrix ( i , j ); } } return; }
inline void getThresholdInclusion(MatrixBase<Derived> const & values, const double threshold, vector<bool> & below_threshold) { const size_t n = values.size(); below_threshold.clear(); for (size_t x = 0; x < n; x++) { below_threshold.push_back(values[x] < threshold); } }
void doKinematicsTemp(const RigidBodyTree &model, KinematicsCache<typename DerivedQ::Scalar> &cache, const MatrixBase<DerivedQ> &q, const MatrixBase<DerivedV> &v, bool compute_JdotV) { // temporary solution. Explicit doKinematics calls will not be necessary in the near future. if (v.size() == 0 && model.num_velocities != 0) cache.initialize(q); else cache.initialize(q, v); model.doKinematics(cache, compute_JdotV); }
Matrix<Scalar, Dynamic, 1> dynamicsBiasTermTemp(const RigidBodyTree &model, KinematicsCache<Scalar> &cache, const MatrixBase<DerivedF> &f_ext_value) { // temporary solution. eigen_aligned_unordered_map<const RigidBody *, Matrix<Scalar, 6, 1> > f_ext; if (f_ext_value.size() > 0) { assert(f_ext_value.cols() == model.bodies.size()); for (DenseIndex i = 0; i < f_ext_value.cols(); i++) { f_ext.insert({model.bodies[i].get(), f_ext_value.col(i)}); } } return model.dynamicsBiasTerm(cache, f_ext); };
Matrix<Scalar, Dynamic, 1> dynamicsBiasTermTemp( const RigidBodyTreed& model, KinematicsCache<Scalar>& cache, const MatrixBase<DerivedF>& f_ext_value) { // temporary solution. typename RigidBodyTree<Scalar>::BodyToWrenchMap external_wrenches; if (f_ext_value.size() > 0) { DRAKE_ASSERT(f_ext_value.cols() == model.bodies.size()); for (Eigen::Index i = 0; i < f_ext_value.cols(); i++) { external_wrenches.insert({model.bodies[i].get(), f_ext_value.col(i)}); } } return model.dynamicsBiasTerm(cache, external_wrenches); }