void normalizeVec( const Eigen::MatrixBase<Derived>& x, typename Derived::PlainObject& x_norm, typename Gradient<Derived, Derived::RowsAtCompileTime, 1>::type* dx_norm, typename Gradient<Derived, Derived::RowsAtCompileTime, 2>::type* ddx_norm) { typename Derived::Scalar xdotx = x.squaredNorm(); typename Derived::Scalar norm_x = std::sqrt(xdotx); x_norm = x / norm_x; if (dx_norm) { dx_norm->setIdentity(x.rows(), x.rows()); (*dx_norm) -= x * x.transpose() / xdotx; (*dx_norm) /= norm_x; if (ddx_norm) { auto dx_norm_transpose = transposeGrad(*dx_norm, x.rows()); auto ddx_norm_times_norm = -matGradMultMat(x_norm, x_norm.transpose(), (*dx_norm), dx_norm_transpose); auto dnorm_inv = -x.transpose() / (xdotx * norm_x); (*ddx_norm) = ddx_norm_times_norm / norm_x; auto temp = (*dx_norm) * norm_x; int n = x.rows(); for (int col = 0; col < n; col++) { auto column_as_matrix = (dnorm_inv(0, col) * temp); for (int row_block = 0; row_block < n; row_block++) { ddx_norm->block(row_block * n, col, n, 1) += column_as_matrix.col(row_block); } } } } }
void MSF_MeasurementBase<EKFState_T>::calculateAndApplyCorrection( shared_ptr<EKFState_T> state, MSF_Core<EKFState_T>& core, const Eigen::MatrixBase<H_type>& H_delayed, const Eigen::MatrixBase<Res_type> & res_delayed, const Eigen::MatrixBase<R_type>& R_delayed) { EIGEN_STATIC_ASSERT_FIXED_SIZE (H_type); EIGEN_STATIC_ASSERT_FIXED_SIZE (R_type); // Get measurements. /// Correction from EKF update. Eigen::Matrix<double, MSF_Core<EKFState_T>::nErrorStatesAtCompileTime, 1> correction_; R_type S; Eigen::Matrix<double, MSF_Core<EKFState_T>::nErrorStatesAtCompileTime, R_type::RowsAtCompileTime> K; typename MSF_Core<EKFState_T>::ErrorStateCov & P = state->P; S = H_delayed * P * H_delayed.transpose() + R_delayed; K = P * H_delayed.transpose() * S.inverse(); correction_ = K * res_delayed; const typename MSF_Core<EKFState_T>::ErrorStateCov KH = (MSF_Core<EKFState_T>::ErrorStateCov::Identity() - K * H_delayed); P = KH * P * KH.transpose() + K * R_delayed * K.transpose(); // Make sure P stays symmetric. P = 0.5 * (P + P.transpose()); core.applyCorrection(state, correction_); }
SOPHUS_FUNC bool isOrthogonal(Eigen::MatrixBase<D> const& R) { using Scalar = typename D::Scalar; static int const N = D::RowsAtCompileTime; static int const M = D::ColsAtCompileTime; static_assert(N == M, "must be a square matrix"); static_assert(N >= 2, "must have compile time dimension >= 2"); return (R * R.transpose() - Matrix<Scalar, N, N>::Identity()).norm() < Constants<Scalar>::epsilon(); }
inline void unSkew(const Eigen::MatrixBase<Matrix3> & M, const Eigen::MatrixBase<Vector3> & v) { EIGEN_STATIC_ASSERT_VECTOR_SPECIFIC_SIZE(Vector3,3); EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Matrix3,3,3); assert((M + M.transpose()).isMuchSmallerThan(M)); Vector3 & v_ = const_cast<Eigen::MatrixBase<Vector3> &>(v).derived(); typedef typename Vector3::RealScalar Scalar; v_[0] = Scalar(0.5) * (M(2,1) - M(1,2)); v_[1] = Scalar(0.5) * (M(0,2) - M(2,0)); v_[2] = Scalar(0.5) * (M(1,0) - M(0,1)); }
void least_squares_eqcon(Eigen::MatrixBase<data1__> &x, const Eigen::MatrixBase<data2__> &A, const Eigen::MatrixBase<data3__> &b, const Eigen::MatrixBase<data4__> &B, const Eigen::MatrixBase<data5__> &d) { typedef Eigen::Matrix<typename Eigen::MatrixBase<data4__>::Scalar, Eigen::Dynamic, Eigen::Dynamic> B_matrixD; B_matrixD Q, R, temp_B, temp_R; typedef Eigen::Matrix<typename Eigen::MatrixBase<data2__>::Scalar, Eigen::Dynamic, Eigen::Dynamic> A_matrixD; A_matrixD A1, A2, temp_A; typedef Eigen::Matrix<typename Eigen::MatrixBase<data5__>::Scalar, Eigen::Dynamic, Eigen::Dynamic> d_matrixD; d_matrixD y; typedef Eigen::Matrix<typename Eigen::MatrixBase<data3__>::Scalar, Eigen::Dynamic, Eigen::Dynamic> b_matrixD; b_matrixD z, rhs; typedef Eigen::Matrix<typename Eigen::MatrixBase<data1__>::Scalar, Eigen::Dynamic, Eigen::Dynamic> x_matrixD; x_matrixD x_temp(A.cols(), b.cols()); typename A_matrixD::Index p(B.rows()), n(A.cols()); #ifdef DEBUG typename A_matrixD::Index m(b.rows()); #endif // build Q and R Eigen::HouseholderQR<B_matrixD> qr(B.transpose()); Q=qr.householderQ(); temp_B=qr.matrixQR(); temp_R=Eigen::TriangularView<B_matrixD, Eigen::Upper>(temp_B); R=temp_R.topRows(p); assert((R.rows()==p) && (R.cols()==p)); assert((Q.rows()==n) && (Q.cols()==n)); // build A1 and A2 temp_A=A*Q; A1=temp_A.leftCols(p); A2=temp_A.rightCols(n-p); #ifdef DEBUG assert((A1.rows()==m) && (A1.cols()==p)); assert((A2.rows()==m) && (A2.cols()==n-p)); #endif assert(A1.cols()==p); assert(A2.cols()==n-p); // solve for y y=R.transpose().lu().solve(d); // setup the unconstrained optimization rhs=b-A1*y; least_squares_uncon(z, A2, rhs); // build the solution x_temp.topRows(p)=y; x_temp.bottomRows(n-p)=z; x=Q*x_temp; }
SOPHUS_FUNC bool isScaledOrthogonalAndPositive(Eigen::MatrixBase<D> const& sR) { using Scalar = typename D::Scalar; static int const N = D::RowsAtCompileTime; static int const M = D::ColsAtCompileTime; using Eigen::numext::pow; using Eigen::numext::sqrt; Scalar det = sR.determinant(); if (det <= Scalar(0)) { return false; } Scalar scale_sqr = pow(det, Scalar(2. / N)); static_assert(N == M, "must be a square matrix"); static_assert(N >= 2, "must have compile time dimension >= 2"); return (sR * sR.transpose() - scale_sqr * Matrix<Scalar, N, N>::Identity()) .template lpNorm<Eigen::Infinity>() < sqrt(Constants<Scalar>::epsilon()); }
inline void approximateNearestOrthogonalMatrix(const Eigen::MatrixBase<Derived>& R) { Eigen::Matrix3d E = R.transpose() * R; E.diagonal().array() -= 1; const_cast<Eigen::MatrixBase<Derived>&>(R) -= 0.5 * R * E; }
inline typename Eigen::MatrixBase<Derived>::ConstTransposeReturnType static trans(const Eigen::MatrixBase<Derived>& mat) { return mat.transpose(); }