/* static */ bool ocraWbiConversions::wbiToOcraSegJacobian(const Eigen::MatrixXd &jac, Eigen::MatrixXd &J) { int dof = DIM_T + DIM_R; if(dof != jac.rows() || dof != J.rows()||jac.cols() != J.cols()) { std::cout<<"ERROR: Input and output matrices dimensions should be the same" <<std::endl; return false; } // FOR FULL n+6 Jacobian ONLY Eigen::MatrixXd jac5,jac6; Eigen::Matrix3d jac1,jac2,jac3,jac4; jac5.resize(3,jac.cols()-6); jac6.resize(3,jac.cols()-6); jac1 = jac.topLeftCorner(3,3); jac2 = jac.block<3,3>(0,3); jac3 = jac.bottomLeftCorner(3,3); jac4 = jac.block<3,3>(3,3); jac5 = jac.topRightCorner(3,jac.cols()-6); jac6 = jac.bottomRightCorner(3,jac.cols()-6); J.topLeftCorner(3,3) = jac4; J.block<3,3>(0,3) = jac3; J.bottomLeftCorner(3,3) = jac2; J.block<3,3>(3,3) = jac1; J.topRightCorner(3,jac.cols()-6) = jac6; J.bottomRightCorner(3,jac.cols()-6) = jac5; return true; }
bool KinematicsMetrics::getManipulability(const robot_state::RobotState &state, const robot_model::JointModelGroup *joint_model_group, double &manipulability, bool translation) const { // state.getJacobian() only works for chain groups. if(!joint_model_group->isChain()) { return false; } // Get joint limits penalty double penalty = getJointLimitsPenalty(state, joint_model_group); if (translation) { Eigen::MatrixXd jacobian = state.getJacobian(joint_model_group); Eigen::JacobiSVD<Eigen::MatrixXd> svdsolver(jacobian.topLeftCorner(3,jacobian.cols())); Eigen::MatrixXd singular_values = svdsolver.singularValues(); for (int i = 0; i < singular_values.rows(); ++i) logDebug("moveit.kin_metrics: Singular value: %d %f",i,singular_values(i,0)); manipulability = penalty * singular_values.minCoeff()/singular_values.maxCoeff(); } else { Eigen::MatrixXd jacobian = state.getJacobian(joint_model_group); Eigen::JacobiSVD<Eigen::MatrixXd> svdsolver(jacobian); Eigen::MatrixXd singular_values = svdsolver.singularValues(); for(int i=0; i < singular_values.rows(); ++i) logDebug("moveit.kin_metrics: Singular value: %d %f",i,singular_values(i,0)); manipulability = penalty * singular_values.minCoeff()/singular_values.maxCoeff(); } return true; }
bool KinematicsMetrics::getManipulabilityIndex(const robot_state::RobotState &state, const robot_model::JointModelGroup *joint_model_group, double &manipulability_index, bool translation) const { // state.getJacobian() only works for chain groups. if(!joint_model_group->isChain()) { return false; } Eigen::MatrixXd jacobian = state.getJacobian(joint_model_group); // Get joint limits penalty double penalty = getJointLimitsPenalty(state, joint_model_group); if (translation) { Eigen::MatrixXd jacobian_2 = jacobian.topLeftCorner(3,jacobian.cols()); Eigen::MatrixXd matrix = jacobian_2*jacobian_2.transpose(); // Get manipulability index manipulability_index = penalty * sqrt(matrix.determinant()); } else { Eigen::MatrixXd matrix = jacobian*jacobian.transpose(); // Get manipulability index manipulability_index = penalty * sqrt(matrix.determinant()); } return true; }
int main(int argc, char* argv[]) { shared_ptr<lcm::LCM> lcm(new lcm::LCM); if (!lcm->good()) return 1; auto quad = std::make_shared<Quadrotor>(); const int num_states = getNumStates(*quad); const int num_positions = num_states / 2; const int num_inputs = getNumInputs(*quad); Eigen::MatrixXd Q = Eigen::MatrixXd::Identity(num_states, num_states); Q.topLeftCorner(num_positions, num_positions) = 10.0 * Eigen::MatrixXd::Identity(num_positions, num_positions); Eigen::MatrixXd R = 0.1 * Eigen::MatrixXd::Identity(num_inputs, num_inputs); QuadrotorState<double> xG; xG.z = 1; QuadrotorInput<double> uG; uG.w1 = quad->m * quad->g * 0.25; uG.w2 = uG.w1; uG.w3 = uG.w1; uG.w4 = uG.w1; auto c = MakeTimeInvariantLqrSystem(*quad, xG, uG, Q, R); auto v = std::make_shared<BotVisualizer<QuadrotorState> >( lcm, GetDrakePath() + "/examples/Quadrotor/quadrotor.urdf", drake::multibody::joints::kRollPitchYaw); auto sys = cascade(feedback(quad, c), v); SimulationOptions options; options.realtime_factor = 1.0; options.initial_step_size = 0.005; if (commandLineOptionExists(argv, argv + argc, "--non-realtime")) { options.warn_real_time_violation = true; } for (int i = 0; i < 5; i++) { Eigen::Matrix<double, 12, 1> x0 = toEigen(xG); x0 += toEigen(getRandomVector<QuadrotorState>()); simulate(*sys, 0, 10, x0, options); } // todo: change this back to runLCM instead of just simulate }
/* static */ bool ocraWbiConversions::wbiToOcraCoMJacobian(const Eigen::MatrixXd &jac, Eigen::Matrix<double,3,Eigen::Dynamic> &J) { if(DIM_T != jac.rows() || jac.cols() != J.cols()) { std::cout<<"ERROR: Input and output matrices dimensions should be the same" <<std::endl; return false; } Eigen::MatrixXd jac3; Eigen::Matrix3d jac1,jac2; jac3.resize(3,jac.cols()-6); jac1 = jac.topLeftCorner(3,3); jac2 = jac.block<3,3>(0,3); jac3 = jac.topRightCorner(3,jac.cols()-6); J.topLeftCorner(3,3) = jac2; J.block<3,3>(0,3) = jac1; J.topRightCorner(3,jac.cols()-6) = jac3; return true; }
void MagCal::calcMagComp() { /* * Inspired by * http://davidegironi.blogspot.it/2013/01/magnetometer-calibration-helper-01-for.html#.UriTqkMjulM * * Ellipsoid fit from: * http://www.mathworks.com/matlabcentral/fileexchange/24693-ellipsoid-fit * * To use Eigen to convert matlab code, have a look at Eigen/AsciiQuickReference.txt */ if (mMagSamples.size() < 9) { QMessageBox::warning(this, "Magnetometer compensation", "Too few points."); return; } int samples = mMagSamples.size(); Eigen::VectorXd ex(samples); Eigen::VectorXd ey(samples); Eigen::VectorXd ez(samples); for (int i = 0;i < samples;i++) { ex(i) = mMagSamples.at(i).at(0); ey(i) = mMagSamples.at(i).at(1); ez(i) = mMagSamples.at(i).at(2); } Eigen::MatrixXd eD(samples, 9); for (int i = 0;i < samples;i++) { eD(i, 0) = ex(i) * ex(i); eD(i, 1) = ey(i) * ey(i); eD(i, 2) = ez(i) * ez(i); eD(i, 3) = 2.0 * ex(i) * ey(i); eD(i, 4) = 2.0 * ex(i) * ez(i); eD(i, 5) = 2.0 * ey(i) * ez(i); eD(i, 6) = 2.0 * ex(i); eD(i, 7) = 2.0 * ey(i); eD(i, 8) = 2.0 * ez(i); } Eigen::MatrixXd etmp1 = eD.transpose() * eD; Eigen::MatrixXd etmp2 = eD.transpose() * Eigen::MatrixXd::Ones(samples, 1); Eigen::VectorXd eV = etmp1.lu().solve(etmp2); Eigen::MatrixXd eA(4, 4); eA(0,0)=eV(0); eA(0,1)=eV(3); eA(0,2)=eV(4); eA(0,3)=eV(6); eA(1,0)=eV(3); eA(1,1)=eV(1); eA(1,2)=eV(5); eA(1,3)=eV(7); eA(2,0)=eV(4); eA(2,1)=eV(5); eA(2,2)=eV(2); eA(2,3)=eV(8); eA(3,0)=eV(6); eA(3,1)=eV(7); eA(3,2)=eV(8); eA(3,3)=-1.0; Eigen::MatrixXd eCenter = -eA.topLeftCorner(3, 3).lu().solve(eV.segment(6, 3)); Eigen::MatrixXd eT = Eigen::MatrixXd::Identity(4, 4); eT(3, 0) = eCenter(0); eT(3, 1) = eCenter(1); eT(3, 2) = eCenter(2); Eigen::MatrixXd eR = eT * eA * eT.transpose(); Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> eEv(eR.topLeftCorner(3, 3) * (-1.0 / eR(3, 3))); Eigen::MatrixXd eVecs = eEv.eigenvectors(); Eigen::MatrixXd eVals = eEv.eigenvalues(); Eigen::MatrixXd eRadii(3, 1); eRadii(0) = sqrt(1.0 / eVals(0)); eRadii(1) = sqrt(1.0 / eVals(1)); eRadii(2) = sqrt(1.0 / eVals(2)); Eigen::MatrixXd eScale = eRadii.asDiagonal().inverse() * eRadii.minCoeff(); Eigen::MatrixXd eComp = eVecs * eScale * eVecs.transpose(); mMagComp.resize(9); mMagComp[0] = eComp(0, 0); mMagComp[1] = eComp(0, 1); mMagComp[2] = eComp(0, 2); mMagComp[3] = eComp(1, 0); mMagComp[4] = eComp(1, 1); mMagComp[5] = eComp(1, 2); mMagComp[6] = eComp(2, 0); mMagComp[7] = eComp(2, 1); mMagComp[8] = eComp(2, 2); mMagCompCenter.resize(3); mMagCompCenter[0] = eCenter(0, 0); mMagCompCenter[1] = eCenter(1, 0); mMagCompCenter[2] = eCenter(2, 0); QVector<double> magX, magY, magZ; for (int i = 0;i < mMagSamples.size();i++) { double mx = mMagSamples.at(i).at(0); double my = mMagSamples.at(i).at(1); double mz = mMagSamples.at(i).at(2); mx -= mMagCompCenter.at(0); my -= mMagCompCenter.at(1); mz -= mMagCompCenter.at(2); magX.append(mx * mMagComp.at(0) + my * mMagComp.at(1) + mz * mMagComp.at(2)); magY.append(mx * mMagComp.at(3) + my * mMagComp.at(4) + mz * mMagComp.at(5)); magZ.append(mx * mMagComp.at(6) + my * mMagComp.at(7) + mz * mMagComp.at(8)); } ui->magSampXyPlot->graph(1)->setData(magX, magY); ui->magSampXzPlot->graph(1)->setData(magX, magZ); ui->magSampYzPlot->graph(1)->setData(magY, magZ); updateMagPlots(); }
static void gradCov(omxFitFunction *oo, FitContext *fc) { const double Scale = Global->llScale; omxExpectation *expectation = oo->expectation; BA81FitState *state = (BA81FitState*) oo->argStruct; BA81Expect *estate = (BA81Expect*) expectation->argStruct; if (estate->verbose >= 1) mxLog("%s: cross product approximation", oo->name()); estate->grp.ba81OutcomeProb(estate->itemParam->data, FALSE); const int numThreads = Global->numThreads; const int numUnique = estate->getNumUnique(); ba81NormalQuad &quad = estate->getQuad(); const int numSpecific = quad.numSpecific; const int maxDims = quad.maxDims; const int pDims = numSpecific? maxDims-1 : maxDims; const int maxAbilities = quad.maxAbilities; Eigen::MatrixXd icovMat(pDims, pDims); if (maxAbilities) { Eigen::VectorXd mean; Eigen::MatrixXd srcMat; estate->getLatentDistribution(fc, mean, srcMat); icovMat = srcMat.topLeftCorner(pDims, pDims); Matrix tmp(icovMat.data(), pDims, pDims); int info = InvertSymmetricPosDef(tmp, 'U'); if (info) { omxRaiseErrorf("%s: latent covariance matrix is not positive definite", oo->name()); return; } icovMat.triangularView<Eigen::Lower>() = icovMat.transpose().triangularView<Eigen::Lower>(); } std::vector<int> &rowMap = estate->grp.rowMap; double *rowWeight = estate->grp.rowWeight; std::vector<bool> &rowSkip = estate->grp.rowSkip; const int totalQuadPoints = quad.totalQuadPoints; omxMatrix *itemParam = estate->itemParam; omxBuffer<double> patternLik(numUnique); const int priDerivCoef = pDims + triangleLoc1(pDims); const int numLatents = maxAbilities + triangleLoc1(maxAbilities); const int thrDerivSize = itemParam->cols * state->itemDerivPadSize; const int totalOutcomes = estate->totalOutcomes(); const int numItems = state->freeItemParams? estate->numItems() : 0; const size_t numParam = fc->varGroup->vars.size(); std::vector<double> thrGrad(numThreads * numParam); std::vector<double> thrMeat(numThreads * numParam * numParam); const double *wherePrep = quad.wherePrep.data(); if (numSpecific == 0) { omxBuffer<double> thrLxk(totalQuadPoints * numThreads); omxBuffer<double> derivCoef(totalQuadPoints * priDerivCoef); if (state->freeLatents) { #pragma omp parallel for num_threads(numThreads) for (int qx=0; qx < totalQuadPoints; qx++) { const double *where = wherePrep + qx * maxDims; calcDerivCoef(fc, state, estate, icovMat.data(), where, derivCoef.data() + qx * priDerivCoef); } } #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); double *lxk = thrLxk.data() + thrId * totalQuadPoints; omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO std::vector<double> deriv0(thrDerivSize); std::vector<double> latentGrad(numLatents); std::vector<double> patGrad(numParam); double *grad = thrGrad.data() + thrId * numParam; double *meat = thrMeat.data() + thrId * numParam * numParam; estate->grp.ba81LikelihoodSlow2(px, lxk); // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalQuadPoints; qx++) { patternLik1 += lxk[qx]; } patternLik[px] = patternLik1; // if (!validPatternLik(state, patternLik1)) complain, TODO for (int qx=0; qx < totalQuadPoints; qx++) { double tmp = lxk[qx]; mapLatentDeriv(state, estate, tmp, derivCoef.data() + qx * priDerivCoef, latentGrad.data()); for (int ix=0; ix < numItems; ++ix) { int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; OMXZERO(expected.data(), estate->itemOutcomes(ix)); expected[pick-1] = tmp; const double *spec = estate->itemSpec(ix); double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; double *myDeriv = deriv0.data() + ix * state->itemDerivPadSize; (*Glibrpf_model[id].dLL1)(spec, iparam, wherePrep + qx * maxDims, expected.data(), myDeriv); } } gradCov_finish_1pat(1 / patternLik1, rowWeight[px], numItems, numLatents, numParam, state, estate, itemParam, deriv0, latentGrad, Scale, patGrad, grad, meat); } } else { const int totalPrimaryPoints = quad.totalPrimaryPoints; const int specificPoints = quad.quadGridSize; omxBuffer<double> thrLxk(totalQuadPoints * numSpecific * numThreads); omxBuffer<double> thrEi(totalPrimaryPoints * numThreads); omxBuffer<double> thrEis(totalPrimaryPoints * numSpecific * numThreads); const int derivPerPoint = priDerivCoef + 2 * numSpecific; omxBuffer<double> derivCoef(totalQuadPoints * derivPerPoint); if (state->freeLatents) { #pragma omp parallel for num_threads(numThreads) for (int qx=0; qx < totalQuadPoints; qx++) { const double *where = wherePrep + qx * maxDims; calcDerivCoef(fc, state, estate, icovMat.data(), where, derivCoef.data() + qx * derivPerPoint); for (int Sgroup=0; Sgroup < numSpecific; ++Sgroup) { calcDerivCoef1(fc, state, estate, where, Sgroup, derivCoef.data() + qx * derivPerPoint + priDerivCoef + 2 * Sgroup); } } } #pragma omp parallel for num_threads(numThreads) for (int px=0; px < numUnique; px++) { if (rowSkip[px]) continue; int thrId = omx_absolute_thread_num(); double *lxk = thrLxk.data() + totalQuadPoints * numSpecific * thrId; double *Ei = thrEi.data() + totalPrimaryPoints * thrId; double *Eis = thrEis.data() + totalPrimaryPoints * numSpecific * thrId; omxBuffer<double> expected(totalOutcomes); // can use maxOutcomes instead TODO std::vector<double> deriv0(thrDerivSize); std::vector<double> latentGrad(numLatents); std::vector<double> patGrad(numParam); double *grad = thrGrad.data() + thrId * numParam; double *meat = thrMeat.data() + thrId * numParam * numParam; estate->grp.cai2010EiEis(px, lxk, Eis, Ei); for (int qx=0, qloc = 0; qx < totalPrimaryPoints; qx++) { for (int sgroup=0; sgroup < numSpecific; ++sgroup) { Eis[qloc] = Ei[qx] / Eis[qloc]; ++qloc; } } for (int qloc=0, eisloc=0, qx=0; eisloc < totalPrimaryPoints * numSpecific; eisloc += numSpecific) { for (int sx=0; sx < specificPoints; sx++) { mapLatentDeriv(state, estate, Eis[eisloc] * lxk[qloc], derivCoef.data() + qx * derivPerPoint, latentGrad.data()); for (int Sgroup=0; Sgroup < numSpecific; Sgroup++) { double lxk1 = lxk[qloc]; double Eis1 = Eis[eisloc + Sgroup]; double tmp = Eis1 * lxk1; mapLatentDerivS(state, estate, Sgroup, tmp, derivCoef.data() + qx * derivPerPoint + priDerivCoef + 2 * Sgroup, latentGrad.data()); for (int ix=0; ix < numItems; ++ix) { if (estate->grp.Sgroup[ix] != Sgroup) continue; int pick = estate->grp.dataColumns[ix][rowMap[px]]; if (pick == NA_INTEGER) continue; OMXZERO(expected.data(), estate->itemOutcomes(ix)); expected[pick-1] = tmp; const double *spec = estate->itemSpec(ix); double *iparam = omxMatrixColumn(itemParam, ix); const int id = spec[RPF_ISpecID]; const int dims = spec[RPF_ISpecDims]; double *myDeriv = deriv0.data() + ix * state->itemDerivPadSize; const double *where = wherePrep + qx * maxDims; Eigen::VectorXd ptheta(dims); for (int dx=0; dx < dims; dx++) { ptheta[dx] = where[std::min(dx, maxDims-1)]; } (*Glibrpf_model[id].dLL1)(spec, iparam, ptheta.data(), expected.data(), myDeriv); } ++qloc; } ++qx; } } // If patternLik is already valid, maybe could avoid this loop TODO double patternLik1 = 0; for (int qx=0; qx < totalPrimaryPoints; ++qx) { patternLik1 += Ei[qx]; } patternLik[px] = patternLik1; gradCov_finish_1pat(1 / patternLik1, rowWeight[px], numItems, numLatents, numParam, state, estate, itemParam, deriv0, latentGrad, Scale, patGrad, grad, meat); } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrGrad.data() + tx * numParam; for (size_t en=0; en < numParam; ++en) { thrGrad[en] += th[en]; } } for (int tx=1; tx < numThreads; ++tx) { double *th = thrMeat.data() + tx * numParam * numParam; for (size_t en=0; en < numParam * numParam; ++en) { thrMeat[en] += th[en]; } } for (size_t d1=0; d1 < numParam; ++d1) { fc->grad(d1) += thrGrad[d1]; } if (fc->infoB) { for (size_t d1=0; d1 < numParam; ++d1) { for (size_t d2=0; d2 < numParam; ++d2) { int cell = d1 * numParam + d2; fc->infoB[cell] += thrMeat[cell]; } } } }
TEST(SparseMatrixFunctionTests, testSchurComplement1) { try { using namespace aslam::backend; typedef sparse_block_matrix::SparseBlockMatrix<Eigen::MatrixXd> SparseBlockMatrix; // Create the sparse Hessian. Two dense blocks. Three sparse. int structure[5] = {2, 2, 3, 3, 3}; std::partial_sum(structure, structure + 5, structure); int marginalizedStartingBlock = 2; int marginalizedStartingIndex = structure[ marginalizedStartingBlock - 1 ]; double lambda = 1; SparseBlockMatrix H(structure, structure, 5, 5, true); Eigen::VectorXd e(H.rows()); e.setRandom(); Eigen::VectorXd b(H.rowBaseOfBlock(marginalizedStartingBlock)); b.setZero(); boost::shared_ptr<SparseBlockMatrix> A(H.slice(0, marginalizedStartingBlock, 0, marginalizedStartingBlock, true)); ASSERT_EQ(marginalizedStartingBlock, A->bRows()); ASSERT_EQ(marginalizedStartingBlock, A->bCols()); A->clear(false); std::vector<Eigen::MatrixXd> invVi; invVi.resize(H.bRows() - marginalizedStartingBlock); // Fill in H. *H.block(0, 0, true) = sm::eigen::randomCovariance<2>() * 100; *H.block(1, 1, true) = sm::eigen::randomCovariance<2>() * 100; *H.block(2, 2, true) = sm::eigen::randomCovariance<3>() * 100; *H.block(3, 3, true) = sm::eigen::randomCovariance<3>() * 100; *H.block(4, 4, true) = sm::eigen::randomCovariance<3>() * 100; // Start with two off diagonals. H.block(0, 4, true)->setRandom(); H.block(0, 4, true)->array() *= 100; H.block(1, 4, true)->setRandom(); H.block(1, 4, true)->array() *= 100; //std::cout << "H:\n" << H << std::endl; applySchurComplement(H, e, lambda, marginalizedStartingBlock, true, *A, invVi, b); Eigen::MatrixXd Hd = H.toDense(); Eigen::MatrixXd U = Hd.topLeftCorner(marginalizedStartingIndex, marginalizedStartingIndex); Eigen::MatrixXd V = Hd.bottomRightCorner(H.rows() - marginalizedStartingIndex, H.rows() - marginalizedStartingIndex); Eigen::MatrixXd W = Hd.topRightCorner(marginalizedStartingIndex, H.rows() - marginalizedStartingIndex); V.diagonal().array() += lambda; Eigen::MatrixXd AA = U - W * V.inverse() * W.transpose(); AA.diagonal().array() += lambda; Eigen::VectorXd epsSparse = e.tail(e.size() - marginalizedStartingIndex); Eigen::VectorXd epsDense = e.head(marginalizedStartingIndex); Eigen::VectorXd bb = epsDense - W * V.inverse() * epsSparse; { SCOPED_TRACE(""); Eigen::MatrixXd Asa = A->toDense().selfadjointView<Eigen::Upper>(); sm::eigen::assertNear(Asa, AA, 1e-12, SM_SOURCE_FILE_POS, "Testing the lhs schur complement"); } { SCOPED_TRACE(""); sm::eigen::assertNear(b, bb, 1e-12, SM_SOURCE_FILE_POS, "Testing the rhs schur complement"); } // Let's try it again to make sure stuff gets initialized correctly. applySchurComplement(H, e, lambda, marginalizedStartingBlock, true, *A, invVi, b); { SCOPED_TRACE(""); Eigen::MatrixXd Asa = A->toDense().selfadjointView<Eigen::Upper>(); sm::eigen::assertNear(Asa, AA, 1e-12, SM_SOURCE_FILE_POS, "Testing the lhs schur complement"); } { SCOPED_TRACE(""); sm::eigen::assertNear(b, bb, 1e-12, SM_SOURCE_FILE_POS, "Testing the rhs schur complement"); } // Now we check the update function. Eigen::VectorXd dx(marginalizedStartingIndex); dx.setRandom(); Eigen::VectorXd denseDs = V.inverse() * (epsSparse - W.transpose() * dx); for (int i = 0; i < H.bRows() - marginalizedStartingBlock; ++i) { Eigen::VectorXd outDsi; buildDsi(i, H, e, marginalizedStartingBlock, invVi[i], dx, outDsi); Eigen::VectorXd dsi = denseDs.segment(H.rowBaseOfBlock(i + marginalizedStartingBlock) - marginalizedStartingIndex, H.rowsOfBlock(i + marginalizedStartingBlock)); sm::eigen::assertNear(outDsi, dsi, 1e-12, SM_SOURCE_FILE_POS, "Checking the update step calculation"); } } catch (const std::exception& e) { FAIL() << "Exception: " << e.what(); } }