コード例 #1
0
ファイル: GaussianConditional.cpp プロジェクト: haidai/gtsam
  /* ************************************************************************* */
  VectorValues GaussianConditional::solveOtherRHS(
    const VectorValues& parents, const VectorValues& rhs) const
  {
    // Concatenate all vector values that correspond to parent variables
    Vector xS = parents.vector(FastVector<Key>(beginParents(), endParents()));

    // Instead of updating getb(), update the right-hand-side from the given rhs
    const Vector rhsR = rhs.vector(FastVector<Key>(beginFrontals(), endFrontals()));
    xS = rhsR - get_S() * xS;

    // Solve Matrix
    Vector soln = get_R().triangularView<Eigen::Upper>().solve(xS);

    // Scale by sigmas
    if(model_)
      soln.array() *= model_->sigmas().array();

    // Insert solution into a VectorValues
    VectorValues result;
    DenseIndex vectorPosition = 0;
    for(const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) {
      result.insert(*frontal, soln.segment(vectorPosition, getDim(frontal)));
      vectorPosition += getDim(frontal);
    }

    return result;
  }
コード例 #2
0
/* ************************************************************************* */
TEST(DoglegOptimizer, ComputeBlend) {
  // Create an arbitrary Bayes Net
  GaussianBayesNet gbn;
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      0, Vector2(1.0,2.0), (Matrix(2, 2) << 3.0,4.0,0.0,6.0).finished(),
      3, (Matrix(2, 2) << 7.0,8.0,9.0,10.0).finished(),
      4, (Matrix(2, 2) << 11.0,12.0,13.0,14.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      1, Vector2(15.0,16.0), (Matrix(2, 2) << 17.0,18.0,0.0,20.0).finished(),
      2, (Matrix(2, 2) << 21.0,22.0,23.0,24.0).finished(),
      4, (Matrix(2, 2) << 25.0,26.0,27.0,28.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      2, Vector2(29.0,30.0), (Matrix(2, 2) << 31.0,32.0,0.0,34.0).finished(),
      3, (Matrix(2, 2) << 35.0,36.0,37.0,38.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      3, Vector2(39.0,40.0), (Matrix(2, 2) << 41.0,42.0,0.0,44.0).finished(),
      4, (Matrix(2, 2) << 45.0,46.0,47.0,48.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
      4, Vector2(49.0,50.0), (Matrix(2, 2) << 51.0,52.0,0.0,54.0).finished()));

  // Compute steepest descent point
  VectorValues xu = gbn.optimizeGradientSearch();

  // Compute Newton's method point
  VectorValues xn = gbn.optimize();

  // The Newton's method point should be more "adventurous", i.e. larger, than the steepest descent point
  EXPECT(xu.vector().norm() < xn.vector().norm());

  // Compute blend
  double Delta = 1.5;
  VectorValues xb = DoglegOptimizerImpl::ComputeBlend(Delta, xu, xn);
  DOUBLES_EQUAL(Delta, xb.vector().norm(), 1e-10);
}
コード例 #3
0
ファイル: HessianFactor.cpp プロジェクト: gburachas/gtsam_pcl
/* ************************************************************************* */
double HessianFactor::error(const VectorValues& c) const {
	// error 0.5*(f - 2*x'*g + x'*G*x)
	const double f = constantTerm();
	const double xtg = c.vector().dot(linearTerm());
	const double xGx = c.vector().transpose() * info_.range(0, this->size(), 0, this->size()).selfadjointView<Eigen::Upper>() *	c.vector();

	return 0.5 * (f - 2.0 * xtg +  xGx);
}
コード例 #4
0
ファイル: GaussianConditional.cpp プロジェクト: haidai/gtsam
  /* ************************************************************************* */
  VectorValues GaussianConditional::solve(const VectorValues& x) const
  {
    // Concatenate all vector values that correspond to parent variables
    const Vector xS = x.vector(FastVector<Key>(beginParents(), endParents()));

    // Update right-hand-side
    const Vector rhs = get_d() - get_S() * xS;

    // Solve matrix
    const Vector solution = get_R().triangularView<Eigen::Upper>().solve(rhs);

    // Check for indeterminant solution
    if (solution.hasNaN()) {
      throw IndeterminantLinearSystemException(keys().front());
    }

    // Insert solution into a VectorValues
    VectorValues result;
    DenseIndex vectorPosition = 0;
    for (const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) {
      result.insert(*frontal, solution.segment(vectorPosition, getDim(frontal)));
      vectorPosition += getDim(frontal);
    }

    return result;
  }
コード例 #5
0
/* ************************************************************************* */
TEST(GaussianBayesNet, ComputeSteepestDescentPoint) {

  // Create an arbitrary Bayes Net
  GaussianBayesNet gbn;
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
    0, Vector2(1.0,2.0), (Matrix(2, 2) << 3.0,4.0,0.0,6.0).finished(),
    3, (Matrix(2, 2) << 7.0,8.0,9.0,10.0).finished(),
    4, (Matrix(2, 2) << 11.0,12.0,13.0,14.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
    1, Vector2(15.0,16.0), (Matrix(2, 2) << 17.0,18.0,0.0,20.0).finished(),
    2, (Matrix(2, 2) << 21.0,22.0,23.0,24.0).finished(),
    4, (Matrix(2, 2) << 25.0,26.0,27.0,28.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
    2, Vector2(29.0,30.0), (Matrix(2, 2) << 31.0,32.0,0.0,34.0).finished(),
    3, (Matrix(2, 2) << 35.0,36.0,37.0,38.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
    3, Vector2(39.0,40.0), (Matrix(2, 2) << 41.0,42.0,0.0,44.0).finished(),
    4, (Matrix(2, 2) << 45.0,46.0,47.0,48.0).finished()));
  gbn += GaussianConditional::shared_ptr(new GaussianConditional(
    4, Vector2(49.0,50.0), (Matrix(2, 2) << 51.0,52.0,0.0,54.0).finished()));

  // Compute the Hessian numerically
  Matrix hessian = numericalHessian<Vector10>(
      boost::bind(&computeError, gbn, _1), Vector10::Zero());

  // Compute the gradient numerically
  Vector gradient = numericalGradient<Vector10>(
      boost::bind(&computeError, gbn, _1), Vector10::Zero());

  // Compute the gradient using dense matrices
  Matrix augmentedHessian = GaussianFactorGraph(gbn).augmentedHessian();
  LONGS_EQUAL(11, (long)augmentedHessian.cols());
  Vector denseMatrixGradient = -augmentedHessian.col(10).segment(0,10);
  EXPECT(assert_equal(gradient, denseMatrixGradient, 1e-5));

  // Compute the steepest descent point
  double step = -gradient.squaredNorm() / (gradient.transpose() * hessian * gradient)(0);
  Vector expected = gradient * step;

  // Compute the steepest descent point with the dogleg function
  VectorValues actual = gbn.optimizeGradientSearch();

  // Check that points agree
  FastVector<Key> keys = list_of(0)(1)(2)(3)(4);
  Vector actualAsVector = actual.vector(keys);
  EXPECT(assert_equal(expected, actualAsVector, 1e-5));

  // Check that point causes a decrease in error
  double origError = GaussianFactorGraph(gbn).error(VectorValues::Zero(actual));
  double newError = GaussianFactorGraph(gbn).error(actual);
  EXPECT(newError < origError);
}
コード例 #6
0
ファイル: testHessianFactor.cpp プロジェクト: DForger/gtsam
/* ************************************************************************* */
TEST(HessianFactor, gradientAtZero)
{
  Matrix G11 = (Matrix(1, 1) << 1);
  Matrix G12 = (Matrix(1, 2) << 0, 0);
  Matrix G22 = (Matrix(2, 2) << 1, 0, 0, 1);
  Vector g1 = (Vector(1) << -7);
  Vector g2 = (Vector(2) << -8, -9);
  double f = 194;

  HessianFactor factor(0, 1, G11, G12, g1, G22, g2, f);

  // test gradient at zero
  VectorValues expectedG = pair_list_of<Key, Vector>(0, -g1) (1, -g2);
  Matrix A; Vector b; boost::tie(A,b) = factor.jacobian();
  FastVector<Key> keys; keys += 0,1;
  EXPECT(assert_equal(-A.transpose()*b, expectedG.vector(keys)));
  VectorValues actualG = factor.gradientAtZero();
  EXPECT(assert_equal(expectedG, actualG));
}
コード例 #7
0
/* ************************************************************************* */
TEST(VectorValues, assignment) {

  VectorValues actual;

  {
    // insert, with out-of-order indices
    VectorValues original;
    original.insert(0, Vector_(1, 1.0));
    original.insert(1, Vector_(2, 2.0, 3.0));
    original.insert(5, Vector_(2, 6.0, 7.0));
    original.insert(2, Vector_(2, 4.0, 5.0));
    actual = original;
  }

  // Check dimensions
  LONGS_EQUAL(6, actual.size());
  LONGS_EQUAL(7, actual.dim());
  LONGS_EQUAL(1, actual.dim(0));
  LONGS_EQUAL(2, actual.dim(1));
  LONGS_EQUAL(2, actual.dim(2));
  LONGS_EQUAL(2, actual.dim(5));

  // Logic
  EXPECT(actual.exists(0));
  EXPECT(actual.exists(1));
  EXPECT(actual.exists(2));
  EXPECT(!actual.exists(3));
  EXPECT(!actual.exists(4));
  EXPECT(actual.exists(5));
  EXPECT(!actual.exists(6));

  // Check values
  EXPECT(assert_equal(Vector_(1, 1.0), actual[0]));
  EXPECT(assert_equal(Vector_(2, 2.0, 3.0), actual[1]));
  EXPECT(assert_equal(Vector_(2, 4.0, 5.0), actual[2]));
  EXPECT(assert_equal(Vector_(2, 6.0, 7.0), actual[5]));
  EXPECT(assert_equal(Vector_(7, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0), actual.vector()));

  // Check exceptions
  CHECK_EXCEPTION(actual.insert(1, Vector()), invalid_argument);
}
コード例 #8
0
ファイル: GaussianConditional.cpp プロジェクト: haidai/gtsam
  /* ************************************************************************* */
  void GaussianConditional::solveTransposeInPlace(VectorValues& gy) const
  {
    Vector frontalVec = gy.vector(FastVector<Key>(beginFrontals(), endFrontals()));
    frontalVec = gtsam::backSubstituteUpper(frontalVec, Matrix(get_R()));

    // Check for indeterminant solution
    if (frontalVec.hasNaN()) throw IndeterminantLinearSystemException(this->keys().front());

    for (const_iterator it = beginParents(); it!= endParents(); it++)
      gy[*it] += -1.0 * Matrix(getA(it)).transpose() * frontalVec;

    // Scale by sigmas
    if(model_)
      frontalVec.array() *= model_->sigmas().array();

    // Write frontal solution into a VectorValues
    DenseIndex vectorPosition = 0;
    for(const_iterator frontal = beginFrontals(); frontal != endFrontals(); ++frontal) {
      gy[*frontal] = frontalVec.segment(vectorPosition, getDim(frontal));
      vectorPosition += getDim(frontal);
    }
  }
コード例 #9
0
/* ************************************************************************* */
TEST( testLinearContainerFactor, hessian_factor_withlinpoints ) {
  // 2 variable example, one pose, one landmark (planar)
  // Initial ordering: x1, l1

  Matrix G11 = (Matrix(3, 3) <<
      1.0, 2.0, 3.0,
      0.0, 5.0, 6.0,
      0.0, 0.0, 9.0);
  Matrix G12 = (Matrix(3, 2) <<
      1.0, 2.0,
      3.0, 5.0,
      4.0, 6.0);
  Vector g1 = (Vector(3) << 1.0,  2.0,  3.0);

  Matrix G22 = (Matrix(2, 2) <<
        0.5, 0.2,
        0.0, 0.6);

  Vector g2 = (Vector(2) << -8.0, -9.0);

  double f = 10.0;

  // Construct full matrices
  Matrix G(5,5);
  G << G11, G12, Matrix::Zero(2,3), G22;

  HessianFactor initFactor(x1, l1, G11, G12, g1, G22, g2, f);

  Values linearizationPoint, expLinPoints;
  linearizationPoint.insert(l1, landmark1);
  linearizationPoint.insert(x1, poseA1);
  expLinPoints = linearizationPoint;
  linearizationPoint.insert(x2, poseA2);

  LinearContainerFactor actFactor(initFactor, linearizationPoint);
  EXPECT(!actFactor.isJacobian());
  EXPECT(actFactor.isHessian());

  EXPECT(actFactor.hasLinearizationPoint());
  Values actLinPoint = *actFactor.linearizationPoint();
  EXPECT(assert_equal(expLinPoints, actLinPoint));

  // Create delta
  Vector delta_l1 = (Vector(2) << 1.0, 2.0);
  Vector delta_x1 = (Vector(3) << 3.0, 4.0, 0.5);
  Vector delta_x2 = (Vector(3) << 6.0, 7.0, 0.3);

  // Check error calculation
  VectorValues delta = linearizationPoint.zeroVectors();
  delta.at(l1) = delta_l1;
  delta.at(x1) = delta_x1;
  delta.at(x2) = delta_x2;
  EXPECT(assert_equal((Vector(5) << 3.0, 4.0, 0.5, 1.0, 2.0), delta.vector(initFactor.keys())));
  Values noisyValues = linearizationPoint.retract(delta);

  double expError = initFactor.error(delta);
  EXPECT_DOUBLES_EQUAL(expError, actFactor.error(noisyValues), tol);
  EXPECT_DOUBLES_EQUAL(initFactor.error(linearizationPoint.zeroVectors()), actFactor.error(linearizationPoint), tol);

  // Compute updated versions
  Vector dv = (Vector(5) << 3.0, 4.0, 0.5, 1.0, 2.0);
  Vector g(5); g << g1, g2;
  Vector g_prime = g - G.selfadjointView<Eigen::Upper>() * dv;

  // Check linearization with corrections for updated linearization point
  Vector g1_prime = g_prime.head(3);
  Vector g2_prime = g_prime.tail(2);
  double f_prime = f + dv.transpose() * G.selfadjointView<Eigen::Upper>() * dv - 2.0 * dv.transpose() * g;
  HessianFactor expNewFactor(x1, l1, G11, G12, g1_prime, G22, g2_prime, f_prime);
  EXPECT(assert_equal(*expNewFactor.clone(), *actFactor.linearize(noisyValues), tol));
}