Exemplo n.º 1
0
bool MainWindow::prepare()
{
  SparseOptimizer* optimizer = viewer->graph;
  if (_currentOptimizationAlgorithmProperty.requiresMarginalize) {
    cerr << "Marginalizing Landmarks" << endl;
    for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer->vertices().begin(); it != optimizer->vertices().end(); ++it) {
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second);
      int vdim = v->dimension();
      v->setMarginalized((vdim == _currentOptimizationAlgorithmProperty.landmarkDim));
    }
  }
  else {
    cerr << "Preparing (no marginalization of Landmarks)" << endl;
    for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer->vertices().begin(); it != optimizer->vertices().end(); ++it) {
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second);
      v->setMarginalized(false);
    }
  }
  viewer->graph->initializeOptimization();
  return true;
}
Exemplo n.º 2
0
int main()
{
  double euc_noise = 0.01;       // noise in position, m
  //  double outlier_ratio = 0.1;


  SparseOptimizer optimizer;
  optimizer.setVerbose(false);

  // variable-size block solver
  BlockSolverX::LinearSolverType * linearSolver = new LinearSolverDense<g2o::BlockSolverX::PoseMatrixType>();
  BlockSolverX * solver_ptr = new BlockSolverX(linearSolver);
  g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);

  optimizer.setAlgorithm(solver);

  vector<Vector3d> true_points;
  for (size_t i=0;i<1000; ++i)
  {
    true_points.push_back(Vector3d((Sample::uniform()-0.5)*3,
                                   Sample::uniform()-0.5,
                                   Sample::uniform()+10));
  }


  // set up two poses
  int vertex_id = 0;
  for (size_t i=0; i<2; ++i)
  {
    // set up rotation and translation for this node
    Vector3d t(0,0,i);
    Quaterniond q;
    q.setIdentity();

    Eigen::Isometry3d cam; // camera pose
    cam = q;
    cam.translation() = t;

    // set up node
    VertexSE3 *vc = new VertexSE3();
    vc->setEstimate(cam);

    vc->setId(vertex_id);      // vertex id

    cerr << t.transpose() << " | " << q.coeffs().transpose() << endl;

    // set first cam pose fixed
    if (i==0)
      vc->setFixed(true);

    // add to optimizer
    optimizer.addVertex(vc);

    vertex_id++;                
  }

  // set up point matches
  for (size_t i=0; i<true_points.size(); ++i)
  {
    // get two poses
    VertexSE3* vp0 = 
      dynamic_cast<VertexSE3*>(optimizer.vertices().find(0)->second);
    VertexSE3* vp1 = 
      dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second);

    // calculate the relative 3D position of the point
    Vector3d pt0,pt1;
    pt0 = vp0->estimate().inverse() * true_points[i];
    pt1 = vp1->estimate().inverse() * true_points[i];

    // add in noise
    pt0 += Vector3d(Sample::gaussian(euc_noise ),
                    Sample::gaussian(euc_noise ),
                    Sample::gaussian(euc_noise ));

    pt1 += Vector3d(Sample::gaussian(euc_noise ),
                    Sample::gaussian(euc_noise ),
                    Sample::gaussian(euc_noise ));

    // form edge, with normals in varioius positions
    Vector3d nm0, nm1;
    nm0 << 0, i, 1;
    nm1 << 0, i, 1;
    nm0.normalize();
    nm1.normalize();

    Edge_V_V_GICP * e           // new edge with correct cohort for caching
        = new Edge_V_V_GICP(); 

    e->setVertex(0, vp0);      // first viewpoint

    e->setVertex(1, vp1);      // second viewpoint

    EdgeGICP meas;
    meas.pos0 = pt0;
    meas.pos1 = pt1;
    meas.normal0 = nm0;
    meas.normal1 = nm1;

    e->setMeasurement(meas);
    //        e->inverseMeasurement().pos() = -kp;
    
    meas = e->measurement();
    // use this for point-plane
    e->information() = meas.prec0(0.01);

    // use this for point-point 
    //    e->information().setIdentity();

    //    e->setRobustKernel(true);
    //e->setHuberWidth(0.01);

    optimizer.addEdge(e);
  }

  // move second cam off of its true position
  VertexSE3* vc = 
    dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second);
  Eigen::Isometry3d cam = vc->estimate();
  cam.translation() = Vector3d(0,0,0.2);
  vc->setEstimate(cam);

  optimizer.initializeOptimization();
  optimizer.computeActiveErrors();
  cout << "Initial chi2 = " << FIXED(optimizer.chi2()) << endl;

  optimizer.setVerbose(true);

  optimizer.optimize(5);

  cout << endl << "Second vertex should be near 0,0,1" << endl;
  cout <<  dynamic_cast<VertexSE3*>(optimizer.vertices().find(0)->second)
    ->estimate().translation().transpose() << endl;
  cout <<  dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second)
    ->estimate().translation().transpose() << endl;
}
Exemplo n.º 3
0
int main(int argc, char **argv) {
  /************************************************************************
   *                          Input handling                              *
   ************************************************************************/
  float rows, cols, gain, square_size;
  float resolution, max_range, usable_range, angle, threshold;
  string g2oFilename, mapFilename;
  g2o::CommandArgs arg;
  arg.param("resolution", resolution, 0.05f, "resolution of the map (how much is in meters a pixel)");
  arg.param("threshold", threshold, -1.0f, "threshold to apply to the frequency map (values under the threshold are discarded)");
  arg.param("rows", rows, 0, "impose the resulting map to have this number of rows");
  arg.param("cols", cols, 0, "impose the resulting map to have this number of columns");
  arg.param("max_range", max_range, -1.0f, "max laser range to consider for map building");
  arg.param("usable_range", usable_range, -1.0f, "usable laser range for map building");
  arg.param("gain", gain, 1, "gain to impose to the pixels of the map");
  arg.param("square_size", square_size, 1, "square size of the region where increment the hits");
  arg.param("angle", angle, 0, "rotate the map of x degrees");
  arg.paramLeftOver("input_graph.g2o", g2oFilename, "", "input g2o graph to use to build the map", false);
  arg.paramLeftOver("output_map", mapFilename, "", "output filename where to save the map (without extension)", false);  
  arg.parseArgs(argc, argv);

  angle = angle*M_PI/180.0;

  /************************************************************************
   *                          Loading Graph                               *
   ************************************************************************/
  // Load graph
  typedef BlockSolver< BlockSolverTraits<-1, -1> >  SlamBlockSolver;
  typedef LinearSolverCSparse<SlamBlockSolver::PoseMatrixType> SlamLinearSolver;
  SlamLinearSolver *linearSolver = new SlamLinearSolver();
  linearSolver->setBlockOrdering(false);
  SlamBlockSolver *blockSolver = new SlamBlockSolver(linearSolver);
  OptimizationAlgorithmGaussNewton *solverGauss = new OptimizationAlgorithmGaussNewton(blockSolver);
  SparseOptimizer *graph = new SparseOptimizer();
  graph->setAlgorithm(solverGauss);    
  graph->load(g2oFilename.c_str());
  
  // Sort verteces
  vector<int> vertexIds(graph->vertices().size());
  int k = 0;
  for(OptimizableGraph::VertexIDMap::iterator it = graph->vertices().begin(); it != graph->vertices().end(); ++it) {
    vertexIds[k++] = (it->first);
  }  
  sort(vertexIds.begin(), vertexIds.end());
  
  /************************************************************************
   *                          Compute map size                            *
   ************************************************************************/
  // Check the entire graph to find map bounding box
  Eigen::Matrix2d boundingBox = Eigen::Matrix2d::Zero();
  std::vector<RobotLaser*> robotLasers;
  std::vector<SE2> robotPoses;
  double xmin=std::numeric_limits<double>::max();
  double xmax=std::numeric_limits<double>::min();
  double ymin=std::numeric_limits<double>::max();
  double ymax=std::numeric_limits<double>::min();

  SE2 baseTransform(0,0,angle);

  for(size_t i = 0; i < vertexIds.size(); ++i) {
    OptimizableGraph::Vertex *_v = graph->vertex(vertexIds[i]);
    VertexSE2 *v = dynamic_cast<VertexSE2*>(_v);
    if(!v) { continue; }
    v->setEstimate(baseTransform*v->estimate());
    OptimizableGraph::Data *d = v->userData();

    while(d) {
      RobotLaser *robotLaser = dynamic_cast<RobotLaser*>(d);
      if(!robotLaser) {
	d = d->next();
	continue;
      }      
      robotLasers.push_back(robotLaser);
      robotPoses.push_back(v->estimate());
      double x = v->estimate().translation().x();
      double y = v->estimate().translation().y();
      
      xmax = xmax > x+usable_range ? xmax : x+usable_range;
      ymax = ymax > y+usable_range ? ymax : y+usable_range;
      xmin = xmin < x-usable_range ? xmin : x-usable_range;
      ymin = ymin < y-usable_range ? ymin : y-usable_range;
 
      d = d->next();
    }
  }

  boundingBox(0,0)=xmin;
  boundingBox(0,1)=xmax;
  boundingBox(1,0)=ymin;
  boundingBox(1,1)=ymax;

  std::cout << "Found " << robotLasers.size() << " laser scans"<< std::endl;
  std::cout << "Bounding box: " << std::endl << boundingBox << std::endl; 
  if(robotLasers.size() == 0)  {
    std::cout << "No laser scans found ... quitting!" << std::endl;
    return 0;
  }

  /************************************************************************
   *                          Compute the map                             *
   ************************************************************************/
  // Create the map
  Eigen::Vector2i size;
  if(rows != 0 && cols != 0) { size = Eigen::Vector2i(rows, cols); }
  else {
    size = Eigen::Vector2i((boundingBox(0, 1) - boundingBox(0, 0))/ resolution,
			   (boundingBox(1, 1) - boundingBox(1, 0))/ resolution);
    } 
  std::cout << "Map size: " << size.transpose() << std::endl;
  if(size.x() == 0 || size.y() == 0) {
    std::cout << "Zero map size ... quitting!" << std::endl;
    return 0;
  }

  

  //Eigen::Vector2f offset(-size.x() * resolution / 2.0f, -size.y() * resolution / 2.0f);
  Eigen::Vector2f offset(boundingBox(0, 0),boundingBox(1, 0));
  FrequencyMapCell unknownCell;
  FrequencyMap map = FrequencyMap(resolution, offset, size, unknownCell);

  for(size_t i = 0; i < vertexIds.size(); ++i) {
    OptimizableGraph::Vertex *_v = graph->vertex(vertexIds[i]);
    VertexSE2 *v = dynamic_cast<VertexSE2*>(_v);
    if(!v) { continue; }
    OptimizableGraph::Data *d = v->userData();
    SE2 robotPose = v->estimate();
    
    while(d) {
      RobotLaser *robotLaser = dynamic_cast<RobotLaser*>(d);
      if(!robotLaser) {
	d = d->next();
	continue;
      }      
      map.integrateScan(robotLaser, robotPose, max_range, usable_range, gain, square_size);
      d = d->next();
    }
  }

  /************************************************************************
   *                          Save map image                              *
   ************************************************************************/
  cv::Mat mapImage(map.rows(), map.cols(), CV_8UC1);
  mapImage.setTo(cv::Scalar(0));
  for(int c = 0; c < map.cols(); c++) {
    for(int r = 0; r < map.rows(); r++) {
      if(map(r, c).misses() == 0 && map(r, c).hits() == 0) {
	mapImage.at<unsigned char>(r, c) = 127;
      } else {
	float fraction = (float)map(r, c).hits()/(float)(map(r, c).hits()+map(r, c).misses());
	
	if (threshold > 0 && fraction > threshold)
	  mapImage.at<unsigned char>(r, c) = 0;
	else if (threshold > 0 && fraction <= threshold)
	  mapImage.at<unsigned char>(r, c) = 255;
	else {
	  float val = 255*(1-fraction);
	  mapImage.at<unsigned char>(r, c) = (unsigned char)val;
	}

      }
      // else if(map(r, c).hits() > threshold) {
      // 	mapImage.at<unsigned char>(r, c) = 255;
      // }
      // else {
      // 	mapImage.at<unsigned char>(r, c) = 0;
      // }
    }
  }
  cv::imwrite(mapFilename + ".png", mapImage);

  /************************************************************************
   *                          Write yaml file                             *
   ************************************************************************/
  std::ofstream ofs(string(mapFilename + ".yaml").c_str());
  Eigen::Vector3f origin(0.0f, 0.0f, 0.0f);
  ofs << "image: " << mapFilename << ".png" << std::endl
      << "resolution: " << resolution << std::endl
      << "origin: [" << origin.x() << ", " << origin.y() << ", " << origin.z() << "]" << std::endl
      << "negate: 0" << std::endl
      << "occupied_thresh: " << 0.65f << std::endl
      << "free_thresh: " << 0.2f << std::endl;
  return 0;
}
Exemplo n.º 4
0
int main(int argc, char** argv)
{
  OptimizableGraph::initMultiThreading();
  int maxIterations;
  bool verbose;
  string inputFilename;
  string gnudump;
  string outputfilename;
  string solverProperties;
  string strSolver;
  string loadLookup;
  bool initialGuess;
  bool initialGuessOdometry;
  bool marginalize;
  bool listTypes;
  bool listSolvers;
  bool listRobustKernels;
  bool incremental;
  bool guiOut;
  int gaugeId;
  string robustKernel;
  bool computeMarginals;
  bool printSolverProperties;
  double huberWidth;
  double gain;
  int maxIterationsWithGain;
  //double lambdaInit;
  int updateGraphEachN = 10;
  string statsFile;
  string summaryFile;
  bool nonSequential;
  // command line parsing
  std::vector<int> gaugeList;
  CommandArgs arg;
  arg.param("i", maxIterations, 5, "perform n iterations, if negative consider the gain");
  arg.param("gain", gain, 1e-6, "the gain used to stop optimization (default = 1e-6)");
  arg.param("ig",maxIterationsWithGain, std::numeric_limits<int>::max(), "Maximum number of iterations with gain enabled (default: inf)");
  arg.param("v", verbose, false, "verbose output of the optimization process");
  arg.param("guess", initialGuess, false, "initial guess based on spanning tree");
  arg.param("guessOdometry", initialGuessOdometry, false, "initial guess based on odometry");
  arg.param("inc", incremental, false, "run incremetally");
  arg.param("update", updateGraphEachN, 10, "updates after x odometry nodes");
  arg.param("guiout", guiOut, false, "gui output while running incrementally");
  arg.param("marginalize", marginalize, false, "on or off");
  arg.param("printSolverProperties", printSolverProperties, false, "print the properties of the solver");
  arg.param("solverProperties", solverProperties, "", "set the internal properties of a solver,\n\te.g., initialLambda=0.0001,maxTrialsAfterFailure=2");
  arg.param("gnudump", gnudump, "", "dump to gnuplot data file");
  arg.param("robustKernel", robustKernel, "", "use this robust error function");
  arg.param("robustKernelWidth", huberWidth, -1., "width for the robust Kernel (only if robustKernel)");
  arg.param("computeMarginals", computeMarginals, false, "computes the marginal covariances of something. FOR TESTING ONLY");
  arg.param("gaugeId", gaugeId, -1, "force the gauge");
  arg.param("o", outputfilename, "", "output final version of the graph");
  arg.param("solver", strSolver, "gn_var", "specify which solver to use underneat\n\t {gn_var, lm_fix3_2, gn_fix6_3, lm_fix7_3}");
#ifndef G2O_DISABLE_DYNAMIC_LOADING_OF_LIBRARIES
  string dummy;
  arg.param("solverlib", dummy, "", "specify a solver library which will be loaded");
  arg.param("typeslib", dummy, "", "specify a types library which will be loaded");
#endif
  arg.param("stats", statsFile, "", "specify a file for the statistics");
  arg.param("listTypes", listTypes, false, "list the registered types");
  arg.param("listRobustKernels", listRobustKernels, false, "list the registered robust kernels");
  arg.param("listSolvers", listSolvers, false, "list the available solvers");
  arg.param("renameTypes", loadLookup, "", "create a lookup for loading types into other types,\n\t TAG_IN_FILE=INTERNAL_TAG_FOR_TYPE,TAG2=INTERNAL2\n\t e.g., VERTEX_CAM=VERTEX_SE3:EXPMAP");
  arg.param("gaugeList", gaugeList, std::vector<int>(), "set the list of gauges separated by commas without spaces \n  e.g: 1,2,3,4,5 ");
  arg.param("summary", summaryFile, "", "append a summary of this optimization run to the summary file passed as argument");
  arg.paramLeftOver("graph-input", inputFilename, "", "graph file which will be processed", true);
  arg.param("nonSequential", nonSequential, false, "apply the robust kernel only on loop closures and not odometries");
  

  arg.parseArgs(argc, argv);

  if (verbose) {
    cout << "# Used Compiler: " << G2O_CXX_COMPILER << endl;
  }

#ifndef G2O_DISABLE_DYNAMIC_LOADING_OF_LIBRARIES
  // registering all the types from the libraries
  DlWrapper dlTypesWrapper;
  loadStandardTypes(dlTypesWrapper, argc, argv);
  // register all the solvers
  DlWrapper dlSolverWrapper;
  loadStandardSolver(dlSolverWrapper, argc, argv);
#else
  if (verbose)
    cout << "# linked version of g2o" << endl;
#endif

  OptimizationAlgorithmFactory* solverFactory = OptimizationAlgorithmFactory::instance();
  if (listSolvers) {
    solverFactory->listSolvers(cout);
  }

  if (listTypes) {
    Factory::instance()->printRegisteredTypes(cout, true);
  }

  if (listRobustKernels) {
    std::vector<std::string> kernels;
    RobustKernelFactory::instance()->fillKnownKernels(kernels);
    cout << "Robust Kernels:" << endl;
    for (size_t i = 0; i < kernels.size(); ++i) {
      cout << kernels[i] << endl;
    }
  }

  SparseOptimizer optimizer;
  optimizer.setVerbose(verbose);
  optimizer.setForceStopFlag(&hasToStop);

  SparseOptimizerTerminateAction* terminateAction = 0;
  if (maxIterations < 0) {
    cerr << "# setup termination criterion based on the gain of the iteration" << endl;
    maxIterations = maxIterationsWithGain;
    terminateAction = new SparseOptimizerTerminateAction;
    terminateAction->setGainThreshold(gain);
    terminateAction->setMaxIterations(maxIterationsWithGain);
    optimizer.addPostIterationAction(terminateAction);
  }

  // allocating the desired solver + testing whether the solver is okay
  OptimizationAlgorithmProperty solverProperty;
  optimizer.setAlgorithm(solverFactory->construct(strSolver, solverProperty));
  if (! optimizer.solver()) {
    cerr << "Error allocating solver. Allocating \"" << strSolver << "\" failed!" << endl;
    return 0;
  }
  
  if (solverProperties.size() > 0) {
    bool updateStatus = optimizer.solver()->updatePropertiesFromString(solverProperties);
    if (! updateStatus) {
      cerr << "Failure while updating the solver properties from the given string" << endl;
    }
  }
  if (solverProperties.size() > 0 || printSolverProperties) {
    optimizer.solver()->printProperties(cerr);
  }

  // Loading the input data
  if (loadLookup.size() > 0) {
    optimizer.setRenamedTypesFromString(loadLookup);
  }
  if (inputFilename.size() == 0) {
    cerr << "No input data specified" << endl;
    return 0;
  } else if (inputFilename == "-") {
    cerr << "Read input from stdin" << endl;
    if (!optimizer.load(cin)) {
      cerr << "Error loading graph" << endl;
      return 2;
    }
  } else {
    cerr << "Read input from " << inputFilename << endl;
    ifstream ifs(inputFilename.c_str());
    if (!ifs) {
      cerr << "Failed to open file" << endl;
      return 1;
    }
    if (!optimizer.load(ifs)) {
      cerr << "Error loading graph" << endl;
      return 2;
    }
  }
  cerr << "Loaded " << optimizer.vertices().size() << " vertices" << endl;
  cerr << "Loaded " << optimizer.edges().size() << " edges" << endl;

  if (optimizer.vertices().size() == 0) {
    cerr << "Graph contains no vertices" << endl;
    return 1;
  }

  set<int> vertexDimensions = optimizer.dimensions();
  if (! optimizer.isSolverSuitable(solverProperty, vertexDimensions)) {
    cerr << "The selected solver is not suitable for optimizing the given graph" << endl;
    return 3;
  }
  assert (optimizer.solver());
  //optimizer.setMethod(str2method(strMethod));
  //optimizer.setUserLambdaInit(lambdaInit);

 
  // check for vertices to fix to remove DoF
  bool gaugeFreedom = optimizer.gaugeFreedom();
  OptimizableGraph::Vertex* gauge=0;
  if (gaugeList.size()){
    cerr << "Fixing gauges: ";
    for (size_t i=0; i<gaugeList.size(); i++){
      int id=gaugeList[i];
      OptimizableGraph::Vertex* v=optimizer.vertex(id);
      if (!v){
        cerr << "fatal, not found the vertex of id " << id << " in the gaugeList. Aborting";
        return -1;
      } else {
        if (i==0)
          gauge = v;
        cerr << v->id() << " ";
        v->setFixed(1);
      }
    }
    cerr << endl;
    gaugeFreedom = false;
  } else {
      gauge=optimizer.findGauge();
  }
  if (gaugeFreedom) {
    if (! gauge) {
      cerr <<  "# cannot find a vertex to fix in this thing" << endl;
      return 2;
    } else {
      cerr << "# graph is fixed by node " << gauge->id() << endl;
      gauge->setFixed(true);
    }
  } else {
    cerr << "# graph is fixed by priors or already fixed vertex" << endl;
  }

  // if schur, we wanna marginalize the landmarks...
  if (marginalize || solverProperty.requiresMarginalize) {
    int maxDim = *vertexDimensions.rbegin();
    int minDim = *vertexDimensions.begin();
    if (maxDim != minDim) {
      cerr << "# Preparing Marginalization of the Landmarks ... ";
      for (HyperGraph::VertexIDMap::iterator it=optimizer.vertices().begin(); it!=optimizer.vertices().end(); it++){
        OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second);
        if (v->dimension() != maxDim) {
          v->setMarginalized(true);
        }
      }
      cerr << "done." << endl;
    }
  }

  if (robustKernel.size() > 0) {
    AbstractRobustKernelCreator* creator = RobustKernelFactory::instance()->creator(robustKernel);
    cerr << "# Preparing robust error function ... ";
    if (creator) {
      if (nonSequential) {
        for (SparseOptimizer::EdgeSet::iterator it = optimizer.edges().begin(); it != optimizer.edges().end(); ++it) {
          SparseOptimizer::Edge* e = dynamic_cast<SparseOptimizer::Edge*>(*it);
          if (e->vertices().size() >= 2 && std::abs(e->vertex(0)->id() - e->vertex(1)->id()) != 1) {
            e->setRobustKernel(creator->construct());
            if (huberWidth > 0)
              e->robustKernel()->setDelta(huberWidth);
          }
        }
      } else {
        for (SparseOptimizer::EdgeSet::iterator it = optimizer.edges().begin(); it != optimizer.edges().end(); ++it) {
          SparseOptimizer::Edge* e = dynamic_cast<SparseOptimizer::Edge*>(*it);
          e->setRobustKernel(creator->construct());
          if (huberWidth > 0)
            e->robustKernel()->setDelta(huberWidth);
        }
      }
      cerr << "done." << endl;
    } else {
      cerr << "Unknown Robust Kernel: " << robustKernel << endl;
    }
  }

  // sanity check
  HyperDijkstra d(&optimizer);
  UniformCostFunction f;
  d.shortestPaths(gauge,&f);
  //cerr << PVAR(d.visited().size()) << endl;

  if (d.visited().size()!=optimizer.vertices().size()) {
    cerr << CL_RED("Warning: d.visited().size() != optimizer.vertices().size()") << endl;
    cerr << "visited: " << d.visited().size() << endl;
    cerr << "vertices: " << optimizer.vertices().size() << endl;
  }

  if (incremental) {
    cerr << CL_RED("# Note: this variant performs batch steps in each time step") << endl;
    cerr << CL_RED("#       For a variant which updates the Cholesky factor use the binary g2o_incremental") << endl;
    int incIterations = maxIterations;
    if (! arg.parsedParam("i")) {
      cerr << "# Setting default number of iterations" << endl;
      incIterations = 1;
    }
    int updateDisplayEveryN = updateGraphEachN;
    int maxDim = 0;

    cerr << "# incremental settings" << endl;
    cerr << "#\t solve every " << updateGraphEachN << endl;
    cerr << "#\t iterations  " << incIterations << endl;

    SparseOptimizer::VertexIDMap vertices = optimizer.vertices();
    for (SparseOptimizer::VertexIDMap::const_iterator it = vertices.begin(); it != vertices.end(); ++it) {
      const SparseOptimizer::Vertex* v = static_cast<const SparseOptimizer::Vertex*>(it->second);
      maxDim = max(maxDim, v->dimension());
    }

    vector<SparseOptimizer::Edge*> edges;
    for (SparseOptimizer::EdgeSet::iterator it = optimizer.edges().begin(); it != optimizer.edges().end(); ++it) {
      SparseOptimizer::Edge* e = dynamic_cast<SparseOptimizer::Edge*>(*it);
      edges.push_back(e);
    }
    optimizer.edges().clear();
    optimizer.vertices().clear();
    optimizer.setVerbose(false);

    // sort the edges in a way that inserting them makes sense
    sort(edges.begin(), edges.end(), IncrementalEdgesCompare());
    
    double cumTime = 0.;
    int vertexCount=0;
    int lastOptimizedVertexCount = 0;
    int lastVisUpdateVertexCount = 0;
    bool freshlyOptimized=false;
    bool firstRound = true;
    HyperGraph::VertexSet verticesAdded;
    HyperGraph::EdgeSet edgesAdded;
    for (vector<SparseOptimizer::Edge*>::iterator it = edges.begin(); it != edges.end(); ++it) {
      SparseOptimizer::Edge* e = *it;

      int doInit = 0;
      SparseOptimizer::Vertex* v1 = optimizer.vertex(e->vertices()[0]->id());
      SparseOptimizer::Vertex* v2 = optimizer.vertex(e->vertices()[1]->id());

      if (! v1) {
        SparseOptimizer::Vertex* v = v1 = dynamic_cast<SparseOptimizer::Vertex*>(e->vertices()[0]);
        bool v1Added = optimizer.addVertex(v);
        //cerr << "adding" << v->id() << "(" << v->dimension() << ")" << endl;
        assert(v1Added);
        if (! v1Added)
          cerr << "Error adding vertex " << v->id() << endl;
        else
          verticesAdded.insert(v);
        doInit = 1;
        if (v->dimension() == maxDim)
          vertexCount++;
      }

      if (! v2) {
        SparseOptimizer::Vertex* v = v2 = dynamic_cast<SparseOptimizer::Vertex*>(e->vertices()[1]);
        bool v2Added = optimizer.addVertex(v);
        //cerr << "adding" << v->id() << "(" << v->dimension() << ")" << endl;
        assert(v2Added);
        if (! v2Added)
          cerr << "Error adding vertex " << v->id() << endl;
        else
          verticesAdded.insert(v);
        doInit = 2;
        if (v->dimension() == maxDim)
          vertexCount++;
      }

      // adding the edge and initialization of the vertices
      {
        //cerr << " adding edge " << e->vertices()[0]->id() <<  " " << e->vertices()[1]->id() << endl;
        if (! optimizer.addEdge(e)) {
          cerr << "Unable to add edge " << e->vertices()[0]->id() << " -> " << e->vertices()[1]->id() << endl;
        } else {
          edgesAdded.insert(e);
        }

        if (doInit) {
          OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
          OptimizableGraph::Vertex* to   = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
          switch (doInit){
            case 1: // initialize v1 from v2
              {
                HyperGraph::VertexSet toSet;
                toSet.insert(to);
                if (e->initialEstimatePossible(toSet, from) > 0.) {
                  //cerr << "init: " 
                    //<< to->id() << "(" << to->dimension() << ") -> " 
                    //<< from->id() << "(" << from->dimension() << ") " << endl;
                   e->initialEstimate(toSet, from);
                } else {
                  assert(0 && "Added unitialized variable to the graph");
                }
                break;
              }
            case 2: 
              {
                HyperGraph::VertexSet fromSet;
                fromSet.insert(from);
                if (e->initialEstimatePossible(fromSet, to) > 0.) {
                  //cerr << "init: " 
                    //<< from->id() << "(" << from->dimension() << ") -> " 
                    //<< to->id() << "(" << to->dimension() << ") " << endl;
                  e->initialEstimate(fromSet, to);  
                } else {
                  assert(0 && "Added unitialized variable to the graph");
                }
                break;
              }
            default: cerr << "doInit wrong value\n"; 
          }

        }

      }

      freshlyOptimized=false;
      {
        //cerr << "Optimize" << endl;
        if (vertexCount - lastOptimizedVertexCount >= updateGraphEachN) {
          if (firstRound) {
            if (!optimizer.initializeOptimization()){
              cerr << "initialization failed" << endl;
              return 0;
            }
          } else {
            if (! optimizer.updateInitialization(verticesAdded, edgesAdded)) {
              cerr << "updating initialization failed" << endl;
              return 0;
            }
          }
          verticesAdded.clear();
          edgesAdded.clear();
          double ts = get_monotonic_time();
          int currentIt=optimizer.optimize(incIterations, !firstRound);
          double dts = get_monotonic_time() - ts;
          cumTime += dts;
          firstRound = false;
          //optimizer->setOptimizationTime(cumTime);
          if (verbose) {
            double chi2 = optimizer.chi2();
            cerr << "nodes= " << optimizer.vertices().size() << "\t edges= " << optimizer.edges().size() << "\t chi2= " << chi2
              << "\t time= " << dts << "\t iterations= " << currentIt <<  "\t cumTime= " << cumTime << endl;
          }
          lastOptimizedVertexCount = vertexCount;
          freshlyOptimized = true;

          if (guiOut) {
            if (vertexCount - lastVisUpdateVertexCount >= updateDisplayEveryN) {
              dumpEdges(cout, optimizer);
              lastVisUpdateVertexCount = vertexCount;
            }
          }

        }

        if (! verbose)
          cerr << ".";
      }
      
    } // for all edges

    if (! freshlyOptimized) {
      double ts = get_monotonic_time();
      int currentIt=optimizer.optimize(incIterations, !firstRound);
      double dts = get_monotonic_time() - ts;
      cumTime += dts;
      //optimizer->setOptimizationTime(cumTime);
      if (verbose) {
        double chi2 = optimizer.chi2();
        cerr << "nodes= " << optimizer.vertices().size() << "\t edges= " << optimizer.edges().size() << "\t chi2= " << chi2
          << "\t time= " << dts << "\t iterations= " << currentIt <<  "\t cumTime= " << cumTime << endl;
      }

    }

  } else {

    // BATCH optimization

    if (statsFile!=""){
      // allocate buffer for statistics;
      optimizer.setComputeBatchStatistics(true);
    }
    optimizer.initializeOptimization();
    optimizer.computeActiveErrors();
    double loadChi = optimizer.chi2();
    cerr << "Initial chi2 = " << FIXED(loadChi) << endl;

    if (initialGuess) {
      optimizer.computeInitialGuess();
    } else if (initialGuessOdometry) {
      EstimatePropagatorCostOdometry costFunction(&optimizer);
      optimizer.computeInitialGuess(costFunction);
    }
    double initChi = optimizer.chi2();

    signal(SIGINT, sigquit_handler);
    int result=optimizer.optimize(maxIterations);
    if (maxIterations > 0 && result==OptimizationAlgorithm::Fail){
      cerr << "Cholesky failed, result might be invalid" << endl;
    } else if (computeMarginals){
      std::vector<std::pair<int, int> > blockIndices;
      for (size_t i=0; i<optimizer.activeVertices().size(); i++){
        OptimizableGraph::Vertex* v=optimizer.activeVertices()[i];
        if (v->hessianIndex()>=0){
          blockIndices.push_back(make_pair(v->hessianIndex(), v->hessianIndex()));
        }
        if (v->hessianIndex()>0){
          blockIndices.push_back(make_pair(v->hessianIndex()-1, v->hessianIndex()));
        }
      }
      SparseBlockMatrix<MatrixXd> spinv;
      if (optimizer.computeMarginals(spinv, blockIndices)) {
        for (size_t i=0; i<optimizer.activeVertices().size(); i++){
          OptimizableGraph::Vertex* v=optimizer.activeVertices()[i];
          cerr << "Vertex id:" << v->id() << endl;
          if (v->hessianIndex()>=0){
            cerr << "inv block :" << v->hessianIndex() << ", " << v->hessianIndex()<< endl;
            cerr << *(spinv.block(v->hessianIndex(), v->hessianIndex()));
            cerr << endl;
          }
          if (v->hessianIndex()>0){
            cerr << "inv block :" << v->hessianIndex()-1 << ", " << v->hessianIndex()<< endl;
            cerr << *(spinv.block(v->hessianIndex()-1, v->hessianIndex()));
            cerr << endl;
          }
        }
      }
    }
    
    optimizer.computeActiveErrors();
    double finalChi=optimizer.chi2();

    if  (summaryFile!="") {
      PropertyMap summary;
      summary.makeProperty<StringProperty>("filename", inputFilename);
      summary.makeProperty<IntProperty>("n_vertices", optimizer.vertices().size());
      summary.makeProperty<IntProperty>("n_edges", optimizer.edges().size());
      
      int nLandmarks=0;
      int nPoses=0;
      int maxDim = *vertexDimensions.rbegin();
      for (HyperGraph::VertexIDMap::iterator it=optimizer.vertices().begin(); it!=optimizer.vertices().end(); it++){
	OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second);
	if (v->dimension() != maxDim) {
	  nLandmarks++;
	} else
	  nPoses++;
      }
      set<string> edgeTypes;
      for (HyperGraph::EdgeSet::iterator it=optimizer.edges().begin(); it!=optimizer.edges().end(); it++){
	edgeTypes.insert(Factory::instance()->tag(*it));
      }
      stringstream edgeTypesString;
      for (std::set<string>::iterator it=edgeTypes.begin(); it!=edgeTypes.end(); it++){
	edgeTypesString << *it << " ";
      }

      summary.makeProperty<IntProperty>("n_poses", nPoses);
      summary.makeProperty<IntProperty>("n_landmarks", nLandmarks);
      summary.makeProperty<StringProperty>("edge_types", edgeTypesString.str());
      summary.makeProperty<DoubleProperty>("load_chi", loadChi);
      summary.makeProperty<StringProperty>("solver", strSolver);
      summary.makeProperty<BoolProperty>("robustKernel", robustKernel.size() > 0);
      summary.makeProperty<DoubleProperty>("init_chi", initChi);
      summary.makeProperty<DoubleProperty>("final_chi", finalChi);
      summary.makeProperty<IntProperty>("maxIterations", maxIterations);
      summary.makeProperty<IntProperty>("realIterations", result);
      ofstream os;
      os.open(summaryFile.c_str(), ios::app);
      summary.writeToCSV(os);
    }
    

    if (statsFile!=""){
      cerr << "writing stats to file \"" << statsFile << "\" ... ";
      ofstream os(statsFile.c_str());
      const BatchStatisticsContainer& bsc = optimizer.batchStatistics();
      
      for (int i=0; i<maxIterations; i++) {
        os << bsc[i] << endl;
      }
      cerr << "done." << endl;
    }

  }

  // saving again
  if (gnudump.size() > 0) {
    bool gnuPlotStatus = saveGnuplot(gnudump, optimizer);
    if (! gnuPlotStatus) {
      cerr << "Error while writing gnuplot files" << endl;
    }
  }

  if (outputfilename.size() > 0) {
    if (outputfilename == "-") {
      cerr << "saving to stdout";
      optimizer.save(cout);
    } else {
      cerr << "saving " << outputfilename << " ... ";
      optimizer.save(outputfilename.c_str());
    }
    cerr << "done." << endl;
  }

  // destroy all the singletons
  //Factory::destroy();
  //OptimizationAlgorithmFactory::destroy();
  //HyperGraphActionLibrary::destroy();

  return 0;
}
Exemplo n.º 5
0
int main(int argc, char **argv)
{
  int num_points = 0;

  // check for arg, # of points to use in projection SBA
  if (argc > 1)
    num_points = atoi(argv[1]);

  double euc_noise = 0.1;      // noise in position, m
  double pix_noise = 1.0;       // pixel noise
  //  double outlier_ratio = 0.1;


  SparseOptimizer optimizer;
  optimizer.setVerbose(false);

  // variable-size block solver
  BlockSolverX::LinearSolverType * linearSolver
      = new LinearSolverCSparse<g2o
        ::BlockSolverX::PoseMatrixType>();


  BlockSolverX * solver_ptr
      = new BlockSolverX(linearSolver);

  g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr);

  optimizer.setAlgorithm(solver);

  vector<Vector3d> true_points;
  for (size_t i=0;i<1000; ++i)
  {
    true_points.push_back(Vector3d((Sample::uniform()-0.5)*3,
                                   Sample::uniform()-0.5,
                                   Sample::uniform()+10));
  }


  // set up camera params
  Vector2d focal_length(500,500); // pixels
  Vector2d principal_point(320,240); // 640x480 image
  double baseline = 0.075;      // 7.5 cm baseline

  // set up camera params and projection matrices on vertices
  g2o::VertexSCam::setKcam(focal_length[0],focal_length[1],
                           principal_point[0],principal_point[1],
                           baseline);


  // set up two poses
  int vertex_id = 0;
  for (size_t i=0; i<2; ++i)
  {
    // set up rotation and translation for this node
    Vector3d t(0,0,i);
    Quaterniond q;
    q.setIdentity();

    Eigen::Isometry3d cam;           // camera pose
    cam = q;
    cam.translation() = t;

    // set up node
    VertexSCam *vc = new VertexSCam();
    vc->setEstimate(cam);
    vc->setId(vertex_id);      // vertex id

    cerr << t.transpose() << " | " << q.coeffs().transpose() << endl;

    // set first cam pose fixed
    if (i==0)
      vc->setFixed(true);

    // make sure projection matrices are set
    vc->setAll();

    // add to optimizer
    optimizer.addVertex(vc);

    vertex_id++;                
  }

  // set up point matches for GICP
  for (size_t i=0; i<true_points.size(); ++i)
  {
    // get two poses
    VertexSE3* vp0 = 
      dynamic_cast<VertexSE3*>(optimizer.vertices().find(0)->second);
    VertexSE3* vp1 = 
      dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second);

    // calculate the relative 3D position of the point
    Vector3d pt0,pt1;
    pt0 = vp0->estimate().inverse() * true_points[i];
    pt1 = vp1->estimate().inverse() * true_points[i];

    // add in noise
    pt0 += Vector3d(Sample::gaussian(euc_noise ),
                    Sample::gaussian(euc_noise ),
                    Sample::gaussian(euc_noise ));

    pt1 += Vector3d(Sample::gaussian(euc_noise ),
                    Sample::gaussian(euc_noise ),
                    Sample::gaussian(euc_noise ));

    // form edge, with normals in varioius positions
    Vector3d nm0, nm1;
    nm0 << 0, i, 1;
    nm1 << 0, i, 1;
    nm0.normalize();
    nm1.normalize();

    Edge_V_V_GICP * e           // new edge with correct cohort for caching
        = new Edge_V_V_GICP(); 

    e->vertices()[0]            // first viewpoint
      = dynamic_cast<OptimizableGraph::Vertex*>(vp0);

    e->vertices()[1]            // second viewpoint
      = dynamic_cast<OptimizableGraph::Vertex*>(vp1);

    EdgeGICP meas;

    meas.pos0 = pt0;
    meas.pos1 = pt1;
    meas.normal0 = nm0;
    meas.normal1 = nm1;
    e->setMeasurement(meas);
    meas = e->measurement();
    //        e->inverseMeasurement().pos() = -kp;

    // use this for point-plane
    e->information() = meas.prec0(0.01);

    // use this for point-point 
    //    e->information().setIdentity();

    //    e->setRobustKernel(true);
    //e->setHuberWidth(0.01);

    optimizer.addEdge(e);
  }

  // set up SBA projections with some number of points

  true_points.clear();
  for (int i=0;i<num_points; ++i)
  {
    true_points.push_back(Vector3d((Sample::uniform()-0.5)*3,
                                   Sample::uniform()-0.5,
                                   Sample::uniform()+10));
  }


  // add point projections to this vertex
  for (size_t i=0; i<true_points.size(); ++i)
  {
    g2o::VertexSBAPointXYZ * v_p
        = new g2o::VertexSBAPointXYZ();


    v_p->setId(vertex_id++);
    v_p->setMarginalized(true);
    v_p->setEstimate(true_points.at(i)
        + Vector3d(Sample::gaussian(1),
                   Sample::gaussian(1),
                   Sample::gaussian(1)));

    optimizer.addVertex(v_p);

    for (size_t j=0; j<2; ++j)
      {
        Vector3d z;
        dynamic_cast<g2o::VertexSCam*>
          (optimizer.vertices().find(j)->second)
          ->mapPoint(z,true_points.at(i));

        if (z[0]>=0 && z[1]>=0 && z[0]<640 && z[1]<480)
        {
          z += Vector3d(Sample::gaussian(pix_noise),
                        Sample::gaussian(pix_noise),
                        Sample::gaussian(pix_noise/16.0));

          g2o::Edge_XYZ_VSC * e
              = new g2o::Edge_XYZ_VSC();

          e->vertices()[0]
              = dynamic_cast<g2o::OptimizableGraph::Vertex*>(v_p);

          e->vertices()[1]
              = dynamic_cast<g2o::OptimizableGraph::Vertex*>
              (optimizer.vertices().find(j)->second);

          e->setMeasurement(z);
          //e->inverseMeasurement() = -z;
          e->information() = Matrix3d::Identity();

          //e->setRobustKernel(false);
          //e->setHuberWidth(1);

          optimizer.addEdge(e);
        }

      }
  } // done with adding projection points



  // move second cam off of its true position
  VertexSE3* vc = 
    dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second);
  Eigen::Isometry3d cam = vc->estimate();
  cam.translation() = Vector3d(-0.1,0.1,0.2);
  vc->setEstimate(cam);
  optimizer.initializeOptimization();
  optimizer.computeActiveErrors();
  cout << "Initial chi2 = " << FIXED(optimizer.chi2()) << endl;

  optimizer.setVerbose(true);

  optimizer.optimize(20);

  cout << endl << "Second vertex should be near 0,0,1" << endl;
  cout <<  dynamic_cast<VertexSE3*>(optimizer.vertices().find(0)->second)
    ->estimate().translation().transpose() << endl;
  cout <<  dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second)
    ->estimate().translation().transpose() << endl;
}
Exemplo n.º 6
0
void Optimizer::optimizeUseG2O()
{


    // create the linear solver
    BlockSolverX::LinearSolverType * linearSolver = new LinearSolverCSparse<BlockSolverX::PoseMatrixType>();

    // create the block solver on top of the linear solver
    BlockSolverX* blockSolver = new BlockSolverX(linearSolver);

    // create the algorithm to carry out the optimization
    //OptimizationAlgorithmGaussNewton* optimizationAlgorithm = new OptimizationAlgorithmGaussNewton(blockSolver);
    OptimizationAlgorithmLevenberg* optimizationAlgorithm = new OptimizationAlgorithmLevenberg(blockSolver);

    // NOTE: We skip to fix a variable here, either this is stored in the file
    // itself or Levenberg will handle it.

    // create the optimizer to load the data and carry out the optimization
    SparseOptimizer optimizer;
    SparseOptimizer::initMultiThreading();
    optimizer.setVerbose(true);
    optimizer.setAlgorithm(optimizationAlgorithm);

    {
        pcl::ScopeTime time("G2O setup Graph vertices");
        for (size_t cloud_count = 0; cloud_count < m_pointClouds.size(); ++cloud_count)
        {
            VertexSE3 *vertex = new VertexSE3;
            vertex->setId(cloud_count);
            Isometry3D affine = Isometry3D::Identity();
            affine.linear() = m_pointClouds[cloud_count]->sensor_orientation_.toRotationMatrix().cast<Isometry3D::Scalar>();
            affine.translation() = m_pointClouds[cloud_count]->sensor_origin_.block<3, 1>(0, 0).cast<Isometry3D::Scalar>();
            vertex->setEstimate(affine);
            optimizer.addVertex(vertex);
        }
        optimizer.vertex(0)->setFixed(true);
    }

    {
        pcl::ScopeTime time("G2O setup Graph edges");
        double trans_noise = 0.5, rot_noise = 0.5235;
        EdgeSE3::InformationType infomation = EdgeSE3::InformationType::Zero();
        infomation.block<3, 3>(0, 0) << trans_noise * trans_noise, 0, 0,
                                        0, trans_noise * trans_noise, 0,
                                        0, 0, trans_noise * trans_noise;
        infomation.block<3, 3>(3, 3) << rot_noise * rot_noise, 0, 0,
                                        0, rot_noise * rot_noise, 0,
                                        0, 0, rot_noise * rot_noise;
        for (size_t pair_count = 0; pair_count < m_cloudPairs.size(); ++pair_count)
        {
            CloudPair pair = m_cloudPairs[pair_count];
		    int from = pair.corresIdx.first;
		    int to = pair.corresIdx.second;
            EdgeSE3 *edge = new EdgeSE3;
		    edge->vertices()[0] = optimizer.vertex(from);
		    edge->vertices()[1] = optimizer.vertex(to);

            Eigen::Matrix<double, 6, 6> ATA = Eigen::Matrix<double, 6, 6>::Zero();
            Eigen::Matrix<double, 6, 1> ATb = Eigen::Matrix<double, 6, 1>::Zero();
#pragma unroll 8
            for (size_t point_count = 0; point_count < pair.corresPointIdx.size(); ++point_count) {
                int point_p = pair.corresPointIdx[point_count].first;
                int point_q = pair.corresPointIdx[point_count].second;
                PointType P = m_pointClouds[from]->points[point_p];
                PointType Q = m_pointClouds[to]->points[point_q];

                Eigen::Vector3d p = P.getVector3fMap().cast<double>();
                Eigen::Vector3d q = Q.getVector3fMap().cast<double>();
                Eigen::Vector3d Np = P.getNormalVector3fMap().cast<double>();

                double b = (p - q).dot(Np);

                Eigen::Matrix<double, 6, 1> A_p;
                A_p.block<3, 1>(0, 0) = p.cross(Np);
                A_p.block<3, 1>(3, 0) = Np;

                ATA += A_p * A_p.transpose();
                ATb += A_p * b;
            }

            Eigen::Matrix<double, 6, 1> X = ATA.ldlt().solve(ATb);
            Isometry3D measure = Isometry3D::Identity();
            float beta = X[0];
            float gammar = X[1];
            float alpha = X[2];
            measure.linear() = (Eigen::Matrix3d)Eigen::AngleAxisd(alpha, Eigen::Vector3d::UnitZ()) *
                Eigen::AngleAxisd(gammar, Eigen::Vector3d::UnitY()) *
                Eigen::AngleAxisd(beta, Eigen::Vector3d::UnitX());
            measure.translation() = X.block<3, 1>(3, 0);

            edge->setMeasurement(measure);

		    edge->setInformation(infomation);
            
            optimizer.addEdge(edge);
        }
    }

    optimizer.save("debug_preOpt.g2o");
    {
        pcl::ScopeTime time("g2o optimizing");
        optimizer.initializeOptimization();
        optimizer.optimize(30);
    }
    optimizer.save("debug_postOpt.g2o");

    for (size_t cloud_count = 0; cloud_count < m_pointClouds.size(); ++cloud_count)
    {
        CloudTypePtr tmp(new CloudType);
        Isometry3D trans = ((VertexSE3 *)optimizer.vertices()[cloud_count])->estimate();
        Eigen::Affine3d affine;
        affine.linear() = trans.rotation();
        affine.translation() = trans.translation();
        pcl::transformPointCloudWithNormals(*m_pointClouds[cloud_count], *tmp, affine.cast<float>());
        pcl::copyPointCloud(*tmp, *m_pointClouds[cloud_count]);
    }

    PCL_WARN("Opitimization DONE!!!!\n");

    if (m_params.saveDirectory.length()) {
        if (boost::filesystem::exists(m_params.saveDirectory) && !boost::filesystem::is_directory(m_params.saveDirectory)) {
            boost::filesystem::remove(m_params.saveDirectory);
        }
        boost::filesystem::create_directories(m_params.saveDirectory);

        char filename[1024] = { 0 };
        for (size_t i = 0; i < m_pointClouds.size(); ++i) {
            sprintf(filename, "%s/cloud_%d.ply", m_params.saveDirectory.c_str(), i);
            pcl::io::savePLYFileBinary(filename, *m_pointClouds[i]);
        }
    }
}
Exemplo n.º 7
0
int main(int argc, char** argv)
{
  bool fixLaser;
  int maxIterations;
  bool verbose;
  string inputFilename;
  string outputfilename;
  string rawFilename;
  string odomTestFilename;
  string dumpGraphFilename;
  // command line parsing
  CommandArgs commandLineArguments;
  commandLineArguments.param("i", maxIterations, 10, "perform n iterations");
  commandLineArguments.param("v", verbose, false, "verbose output of the optimization process");
  commandLineArguments.param("o", outputfilename, "", "output final version of the graph");
  commandLineArguments.param("test", odomTestFilename, "", "apply odometry calibration to some test data");
  commandLineArguments.param("dump", dumpGraphFilename, "", "write the graph to the disk");
  commandLineArguments.param("fixLaser", fixLaser, false, "keep the laser offset fixed during optimization");
  commandLineArguments.paramLeftOver("gm2dl-input", inputFilename, "", "gm2dl file which will be processed");
  commandLineArguments.paramLeftOver("raw-log", rawFilename, "", "raw log file containing the odometry");

  commandLineArguments.parseArgs(argc, argv);

  SparseOptimizer optimizer;
  optimizer.setVerbose(verbose);
  optimizer.setForceStopFlag(&hasToStop);

  allocateSolverForSclam(optimizer);

  // loading
  if (! Gm2dlIO::readGm2dl(inputFilename, optimizer, false)) {
    cerr << "Error while loading gm2dl file" << endl;
  }
  DataQueue robotLaserQueue;
  int numLaserOdom = Gm2dlIO::readRobotLaser(rawFilename, robotLaserQueue);
  if (numLaserOdom == 0) {
    cerr << "No raw information read" << endl;
    return 0;
  }
  cerr << "Read " << numLaserOdom << " laser readings from file" << endl;

  bool gaugeFreedom = optimizer.gaugeFreedom();

  OptimizableGraph::Vertex* gauge = optimizer.findGauge();
  if (gaugeFreedom) {
    if (! gauge) {
      cerr <<  "# cannot find a vertex to fix in this thing" << endl;
      return 2;
    } else {
      cerr << "# graph is fixed by node " << gauge->id() << endl;
      gauge->setFixed(true);
    }
  } else {
    cerr << "# graph is fixed by priors" << endl;
  }

  addOdometryCalibLinksDifferential(optimizer, robotLaserQueue);

  // sanity check
  HyperDijkstra d(&optimizer);
  UniformCostFunction f;
  d.shortestPaths(gauge, &f);
  //cerr << PVAR(d.visited().size()) << endl;

  if (d.visited().size()!=optimizer.vertices().size()) {
    cerr << CL_RED("Warning: d.visited().size() != optimizer.vertices().size()") << endl;
    cerr << "visited: " << d.visited().size() << endl;
    cerr << "vertices: " << optimizer.vertices().size() << endl;
    if (1)
      for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer.vertices().begin(); it != optimizer.vertices().end(); ++it) {
        OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second);
        if (d.visited().count(v) == 0) {
          cerr << "\t unvisited vertex " << it->first << " " << (void*)v << endl;
          v->setFixed(true);
        }
      }
  }

  for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer.vertices().begin(); it != optimizer.vertices().end(); ++it) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second);
    if (v->fixed()) {
      cerr << "\t fixed vertex " << it->first << endl;
    }
  }

  VertexSE2* laserOffset = dynamic_cast<VertexSE2*>(optimizer.vertex(Gm2dlIO::ID_LASERPOSE));
  VertexOdomDifferentialParams* odomParamsVertex = dynamic_cast<VertexOdomDifferentialParams*>(optimizer.vertex(Gm2dlIO::ID_ODOMCALIB));

  if (fixLaser) {
    cerr << "Fix position of the laser offset" << endl;
    laserOffset->setFixed(true);
  }

  signal(SIGINT, sigquit_handler);
  cerr << "Doing full estimation" << endl;
  optimizer.initializeOptimization();
  optimizer.computeActiveErrors();
  cerr << "Initial chi2 = " << FIXED(optimizer.chi2()) << endl;

  int i=optimizer.optimize(maxIterations);
  if (maxIterations > 0 && !i){
    cerr << "optimize failed, result might be invalid" << endl;
  }

  if (laserOffset) {
    cerr << "Calibrated laser offset (x, y, theta):" << laserOffset->estimate().toVector().transpose() << endl;
  }

  if (odomParamsVertex) {
    cerr << "Odometry parameters (scaling factors (v_l, v_r, b)): " << odomParamsVertex->estimate().transpose() << endl;
  }

  cerr << "vertices: " << optimizer.vertices().size() << endl;
  cerr << "edges: " << optimizer.edges().size() << endl;

  if (dumpGraphFilename.size() > 0) {
    cerr << "Writing " << dumpGraphFilename << " ... ";
    optimizer.save(dumpGraphFilename.c_str());
    cerr << "done." << endl;
  }

  // optional input of a seperate file for applying the odometry calibration
  if (odomTestFilename.size() > 0) {

    DataQueue testRobotLaserQueue;
    int numTestOdom = Gm2dlIO::readRobotLaser(odomTestFilename, testRobotLaserQueue);
    if (numTestOdom == 0) {
      cerr << "Unable to read test data" << endl;
    } else {

      ofstream rawStream("odometry_raw.txt");
      ofstream calibratedStream("odometry_calibrated.txt");
      const Vector3d& odomCalib = odomParamsVertex->estimate();
      RobotLaser* prev = dynamic_cast<RobotLaser*>(testRobotLaserQueue.buffer().begin()->second);
      SE2 prevCalibratedPose = prev->odomPose();

      for (DataQueue::Buffer::const_iterator it = testRobotLaserQueue.buffer().begin(); it != testRobotLaserQueue.buffer().end(); ++it) {
        RobotLaser* cur = dynamic_cast<RobotLaser*>(it->second);
        assert(cur);

        double dt = cur->timestamp() - prev->timestamp();
        SE2 motion = prev->odomPose().inverse() * cur->odomPose();

        // convert to velocity measurment
        MotionMeasurement motionMeasurement(motion.translation().x(), motion.translation().y(), motion.rotation().angle(), dt);
        VelocityMeasurement velocityMeasurement = OdomConvert::convertToVelocity(motionMeasurement);

        // apply calibration
        VelocityMeasurement calibratedVelocityMeasurment = velocityMeasurement;
        calibratedVelocityMeasurment.setVl(odomCalib(0) * calibratedVelocityMeasurment.vl());
        calibratedVelocityMeasurment.setVr(odomCalib(1) * calibratedVelocityMeasurment.vr());
        MotionMeasurement mm = OdomConvert::convertToMotion(calibratedVelocityMeasurment, odomCalib(2));

        // combine calibrated odometry with the previous pose
        SE2 remappedOdom;
        remappedOdom.fromVector(mm.measurement());
        SE2 calOdomPose = prevCalibratedPose * remappedOdom;

        // write output
        rawStream << prev->odomPose().translation().x() << " " << prev->odomPose().translation().y() << " " << prev->odomPose().rotation().angle() << endl;
        calibratedStream << calOdomPose.translation().x() << " " << calOdomPose.translation().y() << " " << calOdomPose.rotation().angle() << endl;

        prevCalibratedPose = calOdomPose;
        prev = cur;
      }
    }

  }

  if (outputfilename.size() > 0) {
    Gm2dlIO::updateLaserData(optimizer);
    cerr << "Writing " << outputfilename << " ... ";
    bool writeStatus = Gm2dlIO::writeGm2dl(outputfilename, optimizer);
    cerr << (writeStatus ? "done." : "failed") << endl;
  }

  return 0;
}