void Optimizer::augment_sparse_linear_system(SparseSystem& W,
    const Properties& prop) {
  if (prop.method == DOG_LEG) {
    // We're using the incremental version of Powell's Dog-Leg, so we need
    // to form the updated gradient.
    const VectorXd& f_new = W.rhs();

    // Augment the running count for the sum-of-squared errors at the current
    // linearization point.
    current_SSE_at_linpoint += f_new.squaredNorm();

    // Allocate the new gradient vector
    VectorXd g_new(W.num_cols());

    // Compute W^T \cdot f_new
    VectorXd increment = mul_SparseMatrixTrans_Vector(W, f_new);

    // Set g_new = (g_old 0)^T + W^T f_new.
    g_new.head(gradient.size()) = gradient + increment.head(gradient.size());
    g_new.tail(W.num_cols() - gradient.size()) = increment.tail(
        W.num_cols() - gradient.size());

    // Cache the new gradient vector
    gradient = g_new;
  }

  // Apply Givens to QR factorize the newly augmented sparse system.
  for (int i = 0; i < W.num_rows(); i++) {
    SparseVector new_row = W.get_row(i);
    function_system._R.add_row_givens(new_row, W.rhs()(i));
  }
}
Exemple #2
0
// keep the poses at start_t and end_t but get rid of all of the ones in between
// don't like how this function is written. Ideally it would be a list of times or feature ids to be removed?
// Currently returning the marginal information, should change this?
// TODO For cooperative localization work this should work with no landmarks at all. 
// TODO should make an LCM type for a dense factor? 
MatrixXd FactorGraph::marginalize_poses_dense(int64_t start_t, int64_t end_t, int id){
  
  cout << "1: " << time_in_ms() << endl;

  // step 1: Get the information matrix:
  SparseSystem Js = _slam.jacobian();
  MatrixXd J(Js.num_rows(), Js.num_cols());
  for (int r=0; r<Js.num_rows(); r++) {
    for (int c=0; c<Js.num_cols(); c++) {
      J(r,c) = Js(r,c);
    }
  }

  cout << "2: " << time_in_ms() << endl;

  //TODO really should go through and find the markov blanket explicitly. For now I just know that its all landmarks as well as the start and end poses.

  Pose3dTS_Node* start_node = find_pose_from_time_and_id(start_t, id);
  Pose3dTS_Node* end_node   = find_pose_from_time_and_id(end_t,   id);

  MatrixXd J2(J.rows(),J.cols());

  int current_col =0;
  // first and last pose
  for(int i = 0; i< start_node->dim(); i++){
    J2.col(i) = J.col(start_node->start() + i);
    J2.col(i+start_node->dim()) = J.col(end_node->start() + i);
  }
  current_col += start_node->dim() + end_node->dim();

  // landmarks:
  for(int l = 0; l<_features.size(); l++){
    for(int i = 0; i<_features[l]->dim(); i++){
      J2.col(current_col+i) = J.col(_features[l]->start()+i);
    }
    current_col += _features[l]->dim();
  }

  // intermediate poses
  Pose3dTS_Node* next_node = find_next_pose_from_pose(start_node);
  while(next_node != end_node){
    for(int i = 0; i< next_node->dim(); i++){
      J2.col(current_col+i) = J.col(next_node->start() + i);
    }
    current_col+=next_node->dim();
    next_node = find_next_pose_from_pose(next_node);
  }

  cout << "3: " << time_in_ms() << endl;

  MatrixXd H(J2.cols(),J2.cols());
  H = J2.transpose() * J2; // inf matrix
  //  ofstream myfile;
  //  myfile.open("H.txt");
  //  myfile << H;
  //  myfile.close();


  cout << "4: " << time_in_ms() << endl;
  

  // step 4: schur complement.
  MatrixXd A(12 + _features.size()*3, 12 + _features.size()*3);
  A = H.topLeftCorner(12 + _features.size()*3, 12 + _features.size()*3);
  MatrixXd B(12 + _features.size()*3, 6*(_poses[id].size()-2));
  B = H.topRightCorner(12 + _features.size()*3, 6*(_poses[id].size()-2));
  MatrixXd C(6*(_poses[id].size()-2),12 + _features.size()*3);
  C = H.bottomLeftCorner(6*(_poses[id].size()-2),12 + _features.size()*3);
  MatrixXd D(6*(_poses[id].size()-2),6*(_poses[id].size()-2));
  D = H.bottomRightCorner(6*(_poses[id].size()-2),6*(_poses[id].size()-2));
    
  MatrixXd H_m(12+_features.size()*3, 12+_features.size()*3);
  H_m = A - B*(D.inverse())*C;

  cout << "5: " << time_in_ms() << endl;

  // TODO for dense should build constraint here and add as well as remove all of the poses that are being marginalized (including the factors which I think happens automatically)  maybe look at how Nick C-B does the dense constraints... it's a bit tricky maybe to get general and I'm not sure I really need it... Might be able to directly use the GLC formulation

  return H_m;

}
void Optimizer::relinearize(const Properties& prop) {
  // We're going to relinearize about the current estimate.
  function_system.estimate_to_linpoint();

  // prepare factorization
  SparseSystem jac = function_system.jacobian();

  // factorization and new rhs based on new linearization point will be in _R
  VectorXd h_gn = compute_gauss_newton_step(jac, &function_system._R); // modifies _R

  if (prop.method == DOG_LEG) {
    // Compute the gradient and cache it.
    gradient = mul_SparseMatrixTrans_Vector(jac, jac.rhs());

    //Get the value of the sum-of-squared errors at the current linearization point.
    current_SSE_at_linpoint = jac.rhs().squaredNorm();

    // NB: alpha's denominator will be zero iff the gradient vector is zero
    // (since J is full-rank by hypothesis).  But the gradient is zero iff
    // we're already at the minimum, so we don't actually need to to any
    // updates; just set the estimate to be the current linearization point,
    // since we're already at the minimum.

    double alpha_denominator = (jac * gradient).squaredNorm();

    if (alpha_denominator > 0) {
      double alpha_numerator = gradient.squaredNorm();
      double alpha = alpha_numerator / alpha_denominator;

      // These values will be used to update the estimate of Delta
      double F_0, F_h;

      F_0 = current_SSE_at_linpoint;

      double rho_denominator, rho;

      VectorXd h_dl;

      do {
        // We repeat the computation of the dog-leg step, shrinking the
        // trust-region radius if necessary, until we generate a sufficiently
        // small region of trust that we accept the proposed step.

        // Compute dog-leg step.
        // NOTE: Here we use -h_gn because of the weird sign change in the exmap functions.
        h_dl = compute_dog_leg(alpha, -gradient, -h_gn, Delta, rho_denominator);

        // Update the estimate.
        // NOTE:  Here we use -h_dl because of the weird sign change in the exmap functions.
        function_system.apply_exmap(-h_dl);

        // Get the value of the sum-of-squared errors at the new estimate.
        F_h = function_system.weighted_errors(ESTIMATE).squaredNorm();

        // Compute gain ratio.
        rho = (F_0 - F_h) / (rho_denominator);

        update_trust_radius(rho, h_dl.norm());
      } while (rho < 0);

      // Cache last accepted dog-leg step
      last_accepted_hdl = h_dl;
    } else {
      function_system.linpoint_to_estimate();
      last_accepted_hdl = VectorXd::Zero(gradient.size());
    }

  } else {
    // For Gauss-Newton just apply the update directly.
    function_system.apply_exmap(h_gn);
  }
}
int main(int argc, const char* argv[]) {
  Pose2d origin;
  Noise noise = SqrtInformation(10. * eye(3));
  Pose2d_Node* pose_node_1 = new Pose2d_Node(); // create node
  slam.add_node(pose_node_1); // add it to the Slam graph
  Pose2d_Factor* prior = new Pose2d_Factor(pose_node_1, origin, noise); // create prior measurement, an factor
  slam.add_factor(prior); // add it to the Slam graph

  Pose2d_Node* pose_node_2 = new Pose2d_Node(); // create node
  slam.add_node(pose_node_2); // add it to the Slam graph

  Pose2d delta(1., 0., 0.);
  Pose2d_Pose2d_Factor* odo = new Pose2d_Pose2d_Factor(pose_node_1, pose_node_2, delta, noise);
  slam.add_factor(odo);

  slam.batch_optimization();

#if 0
  const Covariances& covariances = slam.covariances();
#else
  // operate on a copy (just an example, cloning is useful for running
  // covariance recovery in a separate thread)
  const Covariances& covariances = slam.covariances().clone();
#endif

  // recovering the full covariance matrix
  cout << "Full covariance matrix:" << endl;
  MatrixXd cov_full = covariances.marginal(slam.get_nodes());
  cout << cov_full << endl << endl;

  // sanity checking by inverting the information matrix, not using R
  SparseSystem Js = slam.jacobian();
  MatrixXd J(Js.num_cols(), Js.num_cols());
  for (int r=0; r<Js.num_cols(); r++) {
    for (int c=0; c<Js.num_cols(); c++) {
      J(r,c) = Js(r,c);
    }
  }
  MatrixXd H = J.transpose() * J;
  MatrixXd cov_full2 = H.inverse();
  cout << cov_full2 << endl;

  // recovering the block-diagonals only of the full covariance matrix
  cout << "Block-diagonals only:" << endl;
  Covariances::node_lists_t node_lists;
  list<Node*> nodes;
  nodes.push_back(pose_node_1);
  node_lists.push_back(nodes);
  nodes.clear();
  nodes.push_back(pose_node_2);
  node_lists.push_back(nodes);
  list<MatrixXd> cov_blocks = covariances.marginal(node_lists);
  int i = 1;
  for (list<MatrixXd>::iterator it = cov_blocks.begin(); it!=cov_blocks.end(); it++, i++) {
    cout << "block " << i << endl;
    cout << *it << endl;
  }

  // recovering individual entries, here the right block column
  cout << "Right block column:" << endl;
  Covariances::node_pair_list_t node_pair_list;
  node_pair_list.push_back(make_pair(pose_node_1, pose_node_2));
  node_pair_list.push_back(make_pair(pose_node_2, pose_node_2));
  list<MatrixXd> cov_entries = covariances.access(node_pair_list);
  for (list<MatrixXd>::iterator it = cov_entries.begin(); it!=cov_entries.end(); it++) {
    cout << *it << endl;
  }
}