Exemplo n.º 1
0
Covariances::Covariances(Slam& slam) : _slam(NULL), _R(slam._R) {
  // update pointers into matrix before making a copy
  slam.update_starts();
  const list<Node*>& nodes = slam.get_nodes();
  for (list<Node*>::const_iterator it = nodes.begin(); it!=nodes.end(); it++) {
    Node* node = *it;
    int start = node->_start;
    int dim = node->dim();
    _index[node] = make_pair(start, dim);
  }  
}
Exemplo n.º 2
0
int main() {
  // instance of the main class that manages and optimizes the pose graph
  Slam slam;

  // locally remember poses
  vector<Pose2d_Node*> pose_nodes;

  Noise noise3 = Information(100. * eye(3));
  Noise noise2 = Information(100. * eye(2));

  // create a first pose (a node)
  Pose2d_Node* new_pose_node = new Pose2d_Node();
  // add it to the graph
  slam.add_node(new_pose_node);
  // also remember it locally
  pose_nodes.push_back(new_pose_node);

  // create a prior measurement (a factor)
  Pose2d origin(0., 0., 0.);
  Pose2d_Factor* prior = new Pose2d_Factor(pose_nodes[0], origin, noise3);
  // add it to the graph
  slam.add_factor(prior);

  for (int i=1; i<4; i++) {
    // next pose
    Pose2d_Node* new_pose_node = new Pose2d_Node();
    slam.add_node(new_pose_node);
    pose_nodes.push_back(new_pose_node);

    // connect to previous with odometry measurement
    Pose2d odometry(1., 0., 0.); // x,y,theta
    Pose2d_Pose2d_Factor* constraint = new Pose2d_Pose2d_Factor(pose_nodes[i-1], pose_nodes[i], odometry, noise3);
    slam.add_factor(constraint);
  }

  // create a landmark
  Point2d_Node* new_point_node = new Point2d_Node();
  slam.add_node(new_point_node);

  // create a pose and the landmark by a measurement
  Point2d measure(5., 3.); // x,y
  Pose2d_Point2d_Factor* measurement =
    new Pose2d_Point2d_Factor(pose_nodes[1], new_point_node, measure, noise2);
  slam.add_factor(measurement);

  // optimize the graph
  slam.batch_optimization();

  // accessing the current estimate of a specific pose
  cout << "Pose 2: " << pose_nodes[2]->value() << endl;

  // printing the complete graph
  cout << endl << "Full graph:" << endl;
  slam.write(cout);

  return 0;
}
Exemplo n.º 3
0
/**
 * Calculate covariances for both points and poses up to the given time step.
 */
void covariances(unsigned int step, list<MatrixXd>& point_marginals,
    list<MatrixXd>& pose_marginals) {
  // make sure return arguments are empty
  point_marginals.clear();
  pose_marginals.clear();

  // combining everything into one call is faster,
  // as it avoids recalculating commonly needed entries
  Covariances::node_lists_t node_lists;
  for (unsigned int i = 0; i < step; i++) {
    list<Node*> entry;
    entry.push_back(loader->pose_nodes()[i]);
    node_lists.push_back(entry);
  }
  for (unsigned int i = 0; i < loader->num_points(step); i++) {
    list<Node*> entry;
    entry.push_back(loader->point_nodes()[i]);
    node_lists.push_back(entry);
  }
  pose_marginals = slam.covariances().marginal(node_lists);

  // split into points and poses
  if (pose_marginals.size() > 0) {
    list<MatrixXd>::iterator center = pose_marginals.begin();
    for (unsigned int i = 0; i < step; i++, center++)
      ;
    point_marginals.splice(point_marginals.begin(), pose_marginals, center,
        pose_marginals.end());
  }
}
Exemplo n.º 4
0
void print_all(Slam& slam) {
  list<Node*> ids = slam.get_nodes();
  for (list<Node*>::iterator iter = ids.begin(); iter != ids.end(); iter++) {
    Pose2d_Node* pose = dynamic_cast<Pose2d_Node*>(*iter);
    cout << pose->value() << endl;
  }
}
Exemplo n.º 5
0
/**
 * The actual processing of data, in separate thread if GUI enabled.
 */
int process(void* unused) {

  // incrementally process data
  slam.set_properties(prop);
  incremental_slam();

  toc("all");

  if (!prop.quiet) {
    if (!batch_processing) {
      cout << endl;
    }
    double accumulated = tictoc("setup") + tictoc("incremental")
        + tictoc("batch");
    cout << "Accumulated computation time: " << accumulated << "s" << endl;
    cout << "(Overall execution time: " << tictoc("all") << "s)" << endl;
    slam.print_stats();
    cout << endl;
  }

  if (save_stats) {
    cout << "Saving statistics to " << fname_stats << endl;
    save_statistics(fname_stats);
    cout << endl;
  }
  if (write_result) {
    cout << "Saving result to " << fname_result << endl;
    slam.save(fname_result);
    cout << endl;
  }

#ifdef USE_GUI
  if (use_gui) {
    while (true) {
      if (viewer.exit_requested()) {
        exit(0);
      }
      SDL_Delay(100);
    }
  }
#endif

  exit(0);
}
Exemplo n.º 6
0
/**
 * Incrementally process factors.
 */
void incremental_slam() {
  unsigned int step = 0;


  unsigned int next_step = step;
  // step by step after reading log file
  for (; loader->more_data(&next_step); step = next_step) {
    check_quit();

    double t0 = tic();

    tic("setup");

    // add new variables and constraints for current step
    for (unsigned int s = step; s < next_step; s++) {
      for (list<Node*>::const_iterator it = loader->nodes(s).begin();
          it != loader->nodes(s).end(); it++) {
        if (prop.verbose)
          cout << **it << endl;
        slam.add_node(*it);
      }
      for (list<Factor*>::const_iterator it = loader->factors(s).begin();
          it != loader->factors(s).end(); it++) {
        if (prop.verbose)
          cout << **it << endl;
        slam.add_factor(*it);
      }
    }

    toc("setup");
    tic("incremental");

    if (!(batch_processing || no_optimization)) {
      slam.update();
    }

    toc("incremental");

    if (save_stats) {
      stats.resize(step + 1);
      stats[step].time = toc(t0);
      stats[step].chi2 = slam.normalized_chi2();
      stats[step].nnz = slam.get_R().nnz();
      stats[step].local_chi2 = slam.local_chi2(100); // last 100 constraints
      stats[step].nnodes = slam.get_nodes().size();
      stats[step].nconstraints = slam.get_factors().size();
    }

    // visualization is not counted in timing
    if (!(batch_processing || no_optimization)) {
      visualize(step);
    }
  }

  visualize(step - 1);

  if (!no_optimization) {
    if (batch_processing) {
      tic("batch");
      slam.batch_optimization();
      toc("batch");
    } else {
      // end with a batch step/relinearization
      prop.mod_batch = 1;
      slam.set_properties(prop);
      tic("final");
      slam.update();
      toc("final");
    }
  }

  visualize(step - 1);
}
Exemplo n.º 7
0
/**
 * Command line argument processing.
 */
void process_arguments(int argc, char* argv[]) {
  int c;
  while ((c = getopt(argc, argv, ":h?vqn:GLS:W:FCBMPNRd:u:b:s:")) != -1) {
    // Each option character has to be in the string in getopt();
    // the first colon changes the error character from '?' to ':';
    // a colon after an option means that there is an extra
    // parameter to this option; 'W' is a reserved character
    switch (c) {
    case 'h':
    case '?':
      cout << intro;
      cout << usage;
      exit(0);
      break;
    case 'v':
      prop.quiet = false;
      prop.verbose = true;
      break;
    case 'q':
      prop.quiet = true;
      prop.verbose = false;
      break;
    case 'n':
      parse_num_lines = atoi(optarg);
      require(parse_num_lines>0, "Number of lines (-n) must be positive (>0).");
      break;
    case 'G':
#ifndef USE_GUI
      require(false, "GUI support (-G) was disabled at compile time");
#endif
      use_gui = true;
      break;
    case 'L':
#ifndef USE_LCM
      require(false, "LCM support (-L) was disabled at compile time");
#endif
      use_lcm = true;
      break;
    case 'S':
      save_stats = true;
      if (optarg != NULL) {
        strncpy(fname_stats, optarg, FNAME_MAX);
      }
      break;
    case 'W':
      write_result = true;
      if (optarg != NULL) {
        strncpy(fname_result, optarg, FNAME_MAX);
      }
      break;
    case 'F':
      prop.force_numerical_jacobian = true;
      break;
    case 'C':
      calculate_covariances = true;
      break;
    case 'B':
      batch_processing = true;
      break;
    case 'M':
      prop.method = LEVENBERG_MARQUARDT;
      break;
    case 'P':
      prop.method = DOG_LEG;
      break;
    case 'N':
      no_optimization = true;
      break;
    case 'R':
      slam.set_cost_function(&robust_cost_function);
      break;
    case 'd':
      mod_draw = atoi(optarg);
      require(mod_draw>0,
          "Number of steps between drawing (-d) must be positive (>0).");
      break;
    case 'u':
      prop.mod_update = atoi(optarg);
      require(prop.mod_update>0,
          "Number of steps between updates (-u) must be positive (>0).");
      break;
    case 'b':
      prop.mod_batch = atoi(optarg);
      require(
          prop.mod_batch>=0,
          "Number of steps between batch steps (-b) must be positive or zero (>=0).");
      break;
    case 's':
      prop.mod_solve = atoi(optarg);
      require(prop.mod_solve>0,
          "Number of steps between solving (-s) must be positive (>0).");
      break;
    case ':': // unknown option, from getopt
      cout << intro;
      cout << usage;
      exit(1);
      break;
    }
  }

  if ((prop.method == LEVENBERG_MARQUARDT) && (!batch_processing)) {
    cout << "Error:  Levenberg-Marquardt optimization has no incremental mode."
        << endl;
    exit(1);
  }

  if (argc > optind + 1) {
    cout << intro;
    cout << endl;
    cout << "Error: Too many arguments." << endl;
    cout << usage;
    exit(1);
  } else if (argc == optind + 1) {
    strncpy(fname, argv[optind], FNAME_MAX);
  }

}
Exemplo n.º 8
0
int main() {
  // locally defined, as nodes and factors below are allocated on the
  // stack, and therefore will go out of scope after the end of this
  // function; note that you can (and typically have to ) dynamically
  // allocate, see demo.cpp, but I used local variables here to make
  // the example simpler.
  Slam slam;

  Noise noise = SqrtInformation(10. * eye(3));

  // first pose graph
  Pose2d prior_origin(0., 0., 0.);
  Pose2d_Node a0;
  slam.add_node(&a0);
  Pose2d_Factor p_a0(&a0, prior_origin, noise);
  slam.add_factor(&p_a0);
  Pose2d odo(1., 0., 0.);
  Pose2d_Node a1;
  slam.add_node(&a1);
  Pose2d_Pose2d_Factor o_a01(&a0, &a1, odo, noise);
  slam.add_factor(&o_a01);

  // second pose graph
  Pose2d_Node b0;
  slam.add_node(&b0);
  Pose2d_Factor p_b0(&b0, prior_origin, noise);
  slam.add_factor(&p_b0);
  Pose2d_Node b1;
  slam.add_node(&b1);
  Pose2d_Pose2d_Factor o_b01(&b0, &b1, odo, noise);
  slam.add_factor(&o_b01);

  cout << "two independent pose graphs:" << endl;
  cout << "batch optimization... (nothing changes)" << endl;
  slam.batch_optimization();
  cout << "...done" << endl;
  cout << "a0: " << a0.value() << endl;
  cout << "a1: " << a1.value() << endl;
  cout << "b0: " << b0.value() << endl;
  cout << "b1: " << b1.value() << endl;

  // constraint between two pose graphs:

  // first pose graph now also has a anchor node, requires an arbitrary
  // prior (here origin); also initializes the anchor node
  Anchor2d_Node anchor0(&slam);
  slam.add_node(&anchor0);
  //Pose2d_Factor p_anchor0(&anchor0, prior_origin, noise);
  //slam.add_factor(&p_anchor0);

  // anchor node for second trajectory (as before)
  Anchor2d_Node anchor1(&slam);
  slam.add_node(&anchor1);

  Pose2d measure0(0., 1., 0.);
  // now we need 2 optional parameters to refer to both anchor nodes -
  // the order determines which trajectory the measurement originates
  // from; also, the first one already needs to be initialized (done
  // using prior above), the second one gets initialized if needed
  Pose2d_Pose2d_Factor d_a1_b1(&a1, &b1, measure0, noise, &anchor0, &anchor1);
  slam.add_factor(&d_a1_b1);

  cout << "\nfirst constraint added:" << endl;

  cout << "batch optimization... (nothing changes)" << endl;
  slam.batch_optimization();
  cout << "...done" << endl;

  cout << "a0: " << a0.value() << endl;
  cout << "a1: " << a1.value() << endl;
  cout << "b0: " << b0.value() << endl;
  cout << "b1: " << b1.value() << endl;
  cout << "anchor0: " << anchor0.value() << endl;
  cout << "anchor1: " << anchor1.value() << endl;

  Pose2d measure1(0., 0.5, 0.); // conflicting measurement, want least squares
  Pose2d_Pose2d_Factor d_a1_b1_2(&a1, &b1, measure1, noise, &anchor0, &anchor1);
  slam.add_factor(&d_a1_b1_2);

  cout << "\nsecond conflicting constraint added (least squares is 0.75):" << endl;

  cout << "batch optimization... (least squares solution)" << endl;
  slam.batch_optimization();
  cout << "...done" << endl;

  cout.precision(2);
  cout << fixed;
  cout << "a0: " << a0.value() << endl;
  cout << "a1: " << a1.value() << endl;
  cout << "b0: " << b0.value() << endl;
  cout << "b1: " << b1.value() << endl;
  cout << "anchor0: " << anchor0.value() << endl;
  cout << "anchor1: " << anchor1.value() << endl;

  slam.save("anchorNodes.graph");

  return 0;
}
Exemplo n.º 9
0
int main(int argc, const char* argv[]) {
  Pose2d origin;
  Noise noise = SqrtInformation(10. * eye(3));
  Pose2d_Node* pose_node_1 = new Pose2d_Node(); // create node
  slam.add_node(pose_node_1); // add it to the Slam graph
  Pose2d_Factor* prior = new Pose2d_Factor(pose_node_1, origin, noise); // create prior measurement, an factor
  slam.add_factor(prior); // add it to the Slam graph

  Pose2d_Node* pose_node_2 = new Pose2d_Node(); // create node
  slam.add_node(pose_node_2); // add it to the Slam graph

  Pose2d delta(1., 0., 0.);
  Pose2d_Pose2d_Factor* odo = new Pose2d_Pose2d_Factor(pose_node_1, pose_node_2, delta, noise);
  slam.add_factor(odo);

  slam.batch_optimization();

#if 0
  const Covariances& covariances = slam.covariances();
#else
  // operate on a copy (just an example, cloning is useful for running
  // covariance recovery in a separate thread)
  const Covariances& covariances = slam.covariances().clone();
#endif

  // recovering the full covariance matrix
  cout << "Full covariance matrix:" << endl;
  MatrixXd cov_full = covariances.marginal(slam.get_nodes());
  cout << cov_full << endl << endl;

  // sanity checking by inverting the information matrix, not using R
  SparseSystem Js = slam.jacobian();
  MatrixXd J(Js.num_cols(), Js.num_cols());
  for (int r=0; r<Js.num_cols(); r++) {
    for (int c=0; c<Js.num_cols(); c++) {
      J(r,c) = Js(r,c);
    }
  }
  MatrixXd H = J.transpose() * J;
  MatrixXd cov_full2 = H.inverse();
  cout << cov_full2 << endl;

  // recovering the block-diagonals only of the full covariance matrix
  cout << "Block-diagonals only:" << endl;
  Covariances::node_lists_t node_lists;
  list<Node*> nodes;
  nodes.push_back(pose_node_1);
  node_lists.push_back(nodes);
  nodes.clear();
  nodes.push_back(pose_node_2);
  node_lists.push_back(nodes);
  list<MatrixXd> cov_blocks = covariances.marginal(node_lists);
  int i = 1;
  for (list<MatrixXd>::iterator it = cov_blocks.begin(); it!=cov_blocks.end(); it++, i++) {
    cout << "block " << i << endl;
    cout << *it << endl;
  }

  // recovering individual entries, here the right block column
  cout << "Right block column:" << endl;
  Covariances::node_pair_list_t node_pair_list;
  node_pair_list.push_back(make_pair(pose_node_1, pose_node_2));
  node_pair_list.push_back(make_pair(pose_node_2, pose_node_2));
  list<MatrixXd> cov_entries = covariances.access(node_pair_list);
  for (list<MatrixXd>::iterator it = cov_entries.begin(); it!=cov_entries.end(); it++) {
    cout << *it << endl;
  }
}
Exemplo n.º 10
0
int main() {
  // setup a simple graph
  // known poses
  Pose2d x0 (0,  0,   M_PI/9.0);
  Pose2d x1 (10, 10,  0.0     );
  Pose2d x2 (20, 20,  M_PI/6.0);
  Pose2d x3 (30, 30, -M_PI/4.0);
  Pose2d x4 (40, 40, -M_PI/8.0);
  // known landmarks
  Point2d la (100, 100);
  
  // mesurements
  Pose2d z0 = x0;
  Pose2d z01 = x1.ominus(x0);
  Pose2d z12 = x2.ominus(x1);
  Pose2d z23 = x3.ominus(x2);
  Pose2d z34 = x4.ominus(x3);
  Pose2d z13 = x3.ominus(x1);
  // landmark measurement
  Point2d z1a = x1.transform_to(la);
    
  // add noise to measurements
  double sigma = 0.01;
  MatrixXd Q = sigma*sigma*eye(3);
  Noise Qsqinf = Information(Q.inverse());
  MatrixXd Q2 = sigma*sigma*eye(2);
  Noise Q2sqinf = Information(Q2.inverse());
  z0  = add_noise(z0,  sigma);
  z01 = add_noise(z01, sigma);
  z12 = add_noise(z12, sigma);
  z23 = add_noise(z23, sigma);
  z34 = add_noise(z34, sigma);
  z13 = add_noise(z13, sigma);
  z1a = add_noise(z1a, sigma);
  
  // nodes
  Pose2d_Node*  n0 = new Pose2d_Node();
  Pose2d_Node*  n1 = new Pose2d_Node();
  Pose2d_Node*  n2 = new Pose2d_Node();
  Pose2d_Node*  n3 = new Pose2d_Node();
  Pose2d_Node*  n4 = new Pose2d_Node();
  Point2d_Node* na = new Point2d_Node();
  
  // make graph
  Slam slam;
  Properties prop;
  prop.verbose = true; prop.quiet = false; prop.method = GAUSS_NEWTON;
  slam.set_properties(prop);
  
  // add nodes to graph
  slam.add_node(n0);
  slam.add_node(n1);
  slam.add_node(n2),
  slam.add_node(n3);
  slam.add_node(n4);
  slam.add_node(na);
    
  // create factors and add them to the graph
  Pose2d_Factor* z0f = new Pose2d_Factor(n0, z0, Qsqinf);
  slam.add_factor(z0f);
  Pose2d_Pose2d_Factor* z01f = new Pose2d_Pose2d_Factor(n0, n1, z01, Qsqinf);
  slam.add_factor(z01f);
  Pose2d_Pose2d_Factor* z12f = new Pose2d_Pose2d_Factor(n1, n2, z12, Qsqinf);
  slam.add_factor(z12f);
  Pose2d_Pose2d_Factor* z23f = new Pose2d_Pose2d_Factor(n2, n3, z23, Qsqinf);
  slam.add_factor(z23f);
  Pose2d_Pose2d_Factor* z34f = new Pose2d_Pose2d_Factor(n3, n4, z34, Qsqinf);
  slam.add_factor(z34f);
  Pose2d_Pose2d_Factor* z13f = new Pose2d_Pose2d_Factor(n1, n3, z13, Qsqinf);
  slam.add_factor(z13f);
  Pose2d_Point2d_Factor* z1af = new Pose2d_Point2d_Factor(n1, na, z1a, Q2sqinf);
  slam.add_factor(z1af);
  
  slam.batch_optimization();
  slam.print_stats();
  ofstream f; f.open ("before.graph"); slam.write(f); f.close();
  //slam.print_graph();
  //print_all(slam);
  
  // get true marginal distribution over p(x0, x2, x3, x4)
  list<Node*> nodes_subset;
  nodes_subset.push_back(n0);
  nodes_subset.push_back(n2);
  nodes_subset.push_back(n3);
  nodes_subset.push_back(n4);
  MatrixXd P_true = slam.covariances().marginal(nodes_subset);
  //MatrixXd L_true = get_info (&slam);
  VectorXd mu_true(12);
  mu_true.segment<3>(0) = n0->value().vector();
  mu_true.segment<3>(3) = n2->value().vector();
  mu_true.segment<3>(6) = n3->value().vector();
  mu_true.segment<3>(9) = n4->value().vector();  
  
  // remove node 1 using GLC ========================================
  bool sparse = true;  // sparse approximate or dense exact
  vector<Factor*> felim = glc_elim_factors (n1); //usefull for local managment of factors
  //vector<Factor*> fnew = glc_remove_node (slam, n1, sparse); // not root shifted
  GLC_RootShift rs;
  vector<Factor*> fnew = glc_remove_node (slam, n1, sparse, &rs); // root shifted
  // ================================================================
  cout << felim.size() << " factor(s) removed." << endl;
  cout << fnew.size() << " new GLC factor(s) added." << endl;
  slam.batch_optimization();
  slam.print_stats();
  f.open ("after.graph"); slam.write(f); f.close();

  // get glc marginal distribution over p(x0, x2, x3, x4)
  MatrixXd P_glc = slam.covariances().marginal(nodes_subset);
  //MatrixXd L_glc = get_info (&slam);
  VectorXd mu_glc(12);
  mu_glc.segment<3>(0) = n0->value().vector();
  mu_glc.segment<3>(3) = n2->value().vector();
  mu_glc.segment<3>(6) = n3->value().vector();
  mu_glc.segment<3>(9) = n4->value().vector();  

  // calcualte the KLD
  int n = mu_glc.size();
  double A = (P_glc.inverse() * P_true).trace();
  VectorXd du = mu_glc - mu_true;
  // deal with difference in angles
  for(int i=0; i<du.size(); i++) {
    if(i % 3 == 2)
      du(i) = standardRad(du(i));
  }   
  double B = du.transpose() * P_glc.inverse() * du;
  double C = log(P_true.determinant()) - log(P_glc.determinant());
  double kld = 0.5*(A + B - C - n);
  cout << "KLD: " << kld << endl;
  
  return 0;
}