bool MainWindow::load(const QString& filename) { ifstream ifs(filename.toStdString().c_str()); if (! ifs) return false; viewer->graph->clear(); bool loadStatus = viewer->graph->load(ifs); if (! loadStatus) return false; _lastSolver = -1; viewer->setUpdateDisplay(true); SparseOptimizer* optimizer = viewer->graph; // update the solvers which are suitable for this graph set<int> vertDims = optimizer->dimensions(); for (size_t i = 0; i < _knownSolvers.size(); ++i) { const OptimizationAlgorithmProperty& sp = _knownSolvers[i]; if (sp.name == "" && sp.desc == "") continue; bool suitableSolver = optimizer->isSolverSuitable(sp, vertDims); qobject_cast<QStandardItemModel *>(coOptimizer->model())->item(i)->setEnabled(suitableSolver); } return loadStatus; }
void MainWindow::setRobustKernel() { SparseOptimizer* optimizer = viewer->graph; bool robustKernel = cbRobustKernel->isChecked(); double huberWidth = leKernelWidth->text().toDouble(); if (robustKernel) { QString strRobustKernel = coRobustKernel->currentText(); AbstractRobustKernelCreator* creator = RobustKernelFactory::instance()->creator(strRobustKernel.toStdString()); if (! creator) { cerr << strRobustKernel.toStdString() << " is not a valid robust kernel" << endl; return; } for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); e->setRobustKernel(creator->construct()); e->robustKernel()->setDelta(huberWidth); } } else { for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); e->setRobustKernel(0); } } }
void MainWindow::setRobustKernel() { SparseOptimizer* optimizer = viewer->graph; bool robustKernel = cbRobustKernel->isChecked(); double huberWidth = leKernelWidth->text().toDouble(); //odometry edges are those whose node ids differ by 1 bool onlyLoop = cbOnlyLoop->isChecked(); if (robustKernel) { QString strRobustKernel = coRobustKernel->currentText(); AbstractRobustKernelCreator* creator = RobustKernelFactory::instance()->creator(strRobustKernel.toStdString()); if (! creator) { cerr << strRobustKernel.toStdString() << " is not a valid robust kernel" << endl; return; } for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); if (onlyLoop) { if (e->vertices().size() >= 2 && std::abs(e->vertex(0)->id() - e->vertex(1)->id()) != 1) { e->setRobustKernel(creator->construct()); e->robustKernel()->setDelta(huberWidth); } } else { e->setRobustKernel(creator->construct()); e->robustKernel()->setDelta(huberWidth); } } } else { for (SparseOptimizer::EdgeSet::const_iterator it = optimizer->edges().begin(); it != optimizer->edges().end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); e->setRobustKernel(0); } } }
int main(int argc, char** argv) { QApplication qapp(argc, argv); #ifdef G2O_HAVE_GLUT glutInit(&argc, argv); #endif string dummy; string inputFilename; CommandArgs arg; arg.param("solverlib", dummy, "", "specify a solver library which will be loaded"); arg.param("typeslib", dummy, "", "specify a types library which will be loaded"); arg.paramLeftOver("graph-input", inputFilename, "", "graph file which will be processed", true); arg.parseArgs(argc, argv); // loading the standard solver / types DlWrapper dlTypesWrapper; loadStandardTypes(dlTypesWrapper, argc, argv); // register all the solvers DlWrapper dlSolverWrapper; loadStandardSolver(dlSolverWrapper, argc, argv); MainWindow mw; mw.updateDisplayedSolvers(); mw.show(); // redirect the output that normally goes to cerr to the textedit in the viewer StreamRedirect redirect(cerr, mw.plainTextEdit); // setting up the optimizer SparseOptimizer* optimizer = new SparseOptimizer(); mw.viewer->graph = optimizer; // set up the GUI action GuiHyperGraphAction guiHyperGraphAction; guiHyperGraphAction.viewer = mw.viewer; optimizer->addPostIterationAction(&guiHyperGraphAction); if (inputFilename.size() > 0) { mw.loadFromFile(QString::fromStdString(inputFilename)); } while (mw.isVisible()) { guiHyperGraphAction.dumpScreenshots = mw.actionDump_Images->isChecked(); qapp.processEvents(); SleepThread::msleep(10); } delete optimizer; // destroy all the singletons //Factory::destroy(); //OptimizationAlgorithmFactory::destroy(); //HyperGraphActionLibrary::destroy(); return 0; }
g2o::SparseOptimizer * MapG2OReflector::g2oInit(){ // graph construction typedef BlockSolver< BlockSolverTraits<-1, -1> > SlamBlockSolver; typedef LinearSolverCSparse<SlamBlockSolver::PoseMatrixType> SlamLinearSolver; SlamLinearSolver* linearSolver = new SlamLinearSolver(); linearSolver->setBlockOrdering(false); SlamBlockSolver* blockSolver = new SlamBlockSolver(linearSolver); OptimizationAlgorithmLevenberg* solverGauss = new OptimizationAlgorithmLevenberg(blockSolver); //OptimizationAlgorithmGaussNewton* solverGauss = new OptimizationAlgorithmGaussNewton(blockSolver); SparseOptimizer * graph = new SparseOptimizer(); graph->setAlgorithm(solverGauss); g2o::ParameterSE3Offset* imuOffset = new ParameterSE3Offset(); imuOffset->setOffset(Eigen::Isometry3d::Identity()); imuOffset->setId(0); graph->addParameter(imuOffset); return graph; }
inline bool addObservation(Point2d observation, double xFasher, double yFasher, LandmarkType type) { { EdgeSE2 * e = new EdgeSE2; e->vertices()[0] = optimizer.vertex(type); e->vertices()[1] = optimizer.vertex(CurrentVertexId); switch (type) { case RightL: observation.y += B2; break; case FrontL: observation.x -= A2; break; case LeftL: observation.y -= B2; break; case BackL: observation.x += A2; break; default: break; } e->setMeasurement(SE2(observation.x, observation.y, 0)); Matrix3d information; information.fill(0.); information(0, 0) = xFasher; information(1, 1) = yFasher; information(2, 2) = 1; e->setInformation(information); g2o::RobustKernelCauchy* rk = new g2o::RobustKernelCauchy; e->setRobustKernel(rk); optimizer.addEdge(e); } atLeastOneObservation = true; return true; }
bool MainWindow::prepare() { SparseOptimizer* optimizer = viewer->graph; if (_currentOptimizationAlgorithmProperty.requiresMarginalize) { cerr << "Marginalizing Landmarks" << endl; for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer->vertices().begin(); it != optimizer->vertices().end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second); int vdim = v->dimension(); v->setMarginalized((vdim == _currentOptimizationAlgorithmProperty.landmarkDim)); } } else { cerr << "Preparing (no marginalization of Landmarks)" << endl; for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer->vertices().begin(); it != optimizer->vertices().end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second); v->setMarginalized(false); } } viewer->graph->initializeOptimization(); return true; }
int RunG2OViewer::run(int argc, char** argv, CommandArgs& arg) { std::string inputFilename; arg.paramLeftOver("graph-input", inputFilename, "", "graph file which will be processed", true); arg.parseArgs(argc, argv); MainWindow mw; mw.updateDisplayedSolvers(); mw.updateRobustKernels(); mw.show(); // redirect the output that normally goes to cerr to the textedit in the viewer StreamRedirect redirect(std::cerr, mw.plainTextEdit); // setting up the optimizer SparseOptimizer* optimizer = new SparseOptimizer(); mw.viewer->graph = optimizer; // set up the GUI action GuiHyperGraphAction guiHyperGraphAction; guiHyperGraphAction.viewer = mw.viewer; //optimizer->addPostIterationAction(&guiHyperGraphAction); optimizer->addPreIterationAction(&guiHyperGraphAction); if (inputFilename.size() > 0) { mw.loadFromFile(QString::fromLocal8Bit(inputFilename.c_str())); } QCoreApplication* myapp = QApplication::instance(); while (mw.isVisible()) { guiHyperGraphAction.dumpScreenshots = mw.actionDump_Images->isChecked(); if (myapp) myapp->processEvents(); SleepThread::msleep(10); } delete optimizer; return 0; }
inline void updateVertexIdx() { if ((ros::Time::now() - lastSavedNodeTime).toSec() >= 0.03) { nodeCounter++; lastSavedNodeTime = ros::Time::now(); PreviousVertexId = CurrentVertexId; CurrentVertexId++; if (CurrentVertexId - LandmarkCount >= 100) { CurrentVertexId = LandmarkCount; } { VertexSE2 * r = new VertexSE2; r->setEstimate(Eigen::Vector3d(location.x, location.y, 0)); r->setFixed(false); r->setId(CurrentVertexId); if (optimizer.vertex(CurrentVertexId) != NULL) { optimizer.removeVertex(optimizer.vertex(CurrentVertexId)); } optimizer.addVertex(r); } { EdgeSE2 * e = new EdgeSE2; e->vertices()[0] = optimizer.vertex(PreviousVertexId); e->vertices()[1] = optimizer.vertex(CurrentVertexId); Point2d dead_reck = getOdometryFromLastGet(); e->setMeasurement(SE2(dead_reck.x, dead_reck.y, 0)); Matrix3d information; information.fill(0.); information(0, 0) = 200; information(1, 1) = 200; information(2, 2) = 1; e->setInformation(information); optimizer.addEdge(e); } } }
inline ~Localization() { optimizer.clear(); delete optimizationAlgorithm; }
int main(int argc, char** argv) { string rawFilename; bool use_viso = false; bool use_cfs = false; double init_x, init_y; cout << endl << "\033[32mA demo implementing the method of stereo-odo calibration.\033[0m" << endl << "* * * Author: \033[32mDoom @ ZJU.\033[0m" << endl << "Usage: sclam_odo_stereo_cal USE_VISO INPUT_FILENAME USE_CLOSEFORM" << endl << endl; if(argc < 2){ cout << "\033[33m[Warning]:No input directory name, using the default one : /home/doom/CSO/data/params.yaml\033[0m" << endl << endl; // Read in our param file CYaml::get().parseParamFile("/home/doom/CSO/data/params.yaml"); // Read in params use_viso = CYaml::get().general()["use_viso"].as<bool>(); rawFilename = CYaml::get().general()["data_folder"].as<std::string>(); use_cfs = CYaml::get().general()["use_closed_form"].as<bool>(); init_x = CYaml::get().general()["init_x"].as<double>(); init_y = CYaml::get().general()["init_y"].as<double>(); } else if(argc == 2){ cout << "\033[31mInput params file directory: \033[0m" << argv[1] << endl << endl; // Read in our param file CYaml::get().parseParamFile(argv[1]); // Read in params use_viso = CYaml::get().general()["use_viso"].as<bool>(); rawFilename = CYaml::get().general()["data_folder"].as<std::string>(); use_cfs = CYaml::get().general()["use_closed_form"].as<bool>(); init_x = CYaml::get().general()["init_x"].as<double>(); init_y = CYaml::get().general()["init_y"].as<double>(); } else { cerr << "\033[31m[FATAL]Bad parameters.\033[0m" << endl; return 1; } // construct a new optimizer SparseOptimizer optimizer; initOptimizer(optimizer); // read the times.txt, to determine how many pics in this directory. stringstream ss; ss << rawFilename << "/votimes.txt"; ifstream in(ss.str().c_str()); int Num = 0; vector<double> timeque; if(in.fail()) { cerr << "\033[1;31m[ERROR]Wrong foldername or no times.txt in this directory.\033[0m" << endl; return 1; } else { string stemp; getline(in, stemp, '\n'); while(in.good()){ Num++; getline(in, stemp, '\n'); } cout << "There are " << Num << " pics in this direcotory." << endl << endl; in.close(); in.open(ss.str().c_str()); double temp; for(int i = 0; i < Num; i++) { in >> temp; timeque.push_back(temp); } in.close(); } // loading odom data cerr << "\033[31mNow loading odometry data.\033[0m" << endl; string odomName = rawFilename + "/newodom.txt"; DataQueue odometryQueue; DataQueue stereovoQueue; SE2 init_offset; int numOdom = Gm2dlIO::readRobotOdom(odomName, odometryQueue); if (numOdom == 0) { cerr << "No raw information read" << endl; return 0; } cerr << "Done...Read " << numOdom << " odom readings from file" << endl << endl; // initial first stereo frame pose SE2 initialStereoPose; bool first = true; int numVo = 0; if(use_viso) { cout << "\033[31mUsing libviso...\033[0m" << rawFilename << endl; RobotOdom* ro = dynamic_cast<RobotOdom*>(odometryQueue.buffer().begin()->second); // init a libviso2 StereoVo viso(rawFilename, Num, ro, timeque); // get initial odometry pose in robot frame and stereo vo. viso.getInitStereoPose(initialStereoPose); numVo = viso.getMotion(stereovoQueue); cerr << "Done...There are " << numVo << " vo datas in the queue." << endl << endl; } else { cerr << "\033[31mLoading pose file...\033[0m" << endl; ss.str(""); ss << rawFilename << "/CameraTrajectory.txt"; in.open(ss.str().c_str()); int numVo = 0; Matrix4D posemat; for(int i = 0; i < Num; i++) { for(int j = 0; j < 4; j++) { for(int k = 0; k < 4; k++) in >> posemat(j, k); } if(first){ init_offset.setTranslation(Eigen::Vector2d(init_x, init_y)); init_offset.setRotation(Eigen::Rotation2Dd::Identity()); // SE2 x(posemat(2,3),-posemat(0,3),acos(posemat(0,0))); // RobotOdom* ro = dynamic_cast<RobotOdom*>(odometryQueue.buffer().begin()->second); initialStereoPose = init_offset; first = false; } // save stereo vo as robotOdom node. RobotOdom* tempVo = new RobotOdom; tempVo->setTimestamp(timeque[i]); SE2 x(posemat(2,3),-posemat(0,3),acos(posemat(0,0))); tempVo->setOdomPose(init_offset*x); stereovoQueue.add(tempVo); numVo++; } in.close(); cerr << "Done...There are " << numVo << " vo datas in the queue." << endl << endl; } // adding the measurements vector<MotionInformation, Eigen::aligned_allocator<MotionInformation> > motions; { std::map<double, RobotData*>::const_iterator it = stereovoQueue.buffer().begin(); std::map<double, RobotData*>::const_iterator prevIt = it++; for (; it != stereovoQueue.buffer().end(); ++it) { MotionInformation mi; RobotOdom* prevVo = dynamic_cast<RobotOdom*>(prevIt->second); RobotOdom* curVo = dynamic_cast<RobotOdom*>(it->second); mi.stereoMotion = prevVo->odomPose().inverse() * curVo->odomPose(); // get the motion of the robot in that time interval RobotOdom* prevOdom = dynamic_cast<RobotOdom*>(odometryQueue.findClosestData(prevVo->timestamp())); RobotOdom* curOdom = dynamic_cast<RobotOdom*>(odometryQueue.findClosestData(curVo->timestamp())); mi.odomMotion = prevOdom->odomPose().inverse() * curOdom->odomPose(); mi.timeInterval = prevOdom->timestamp() - curOdom->timestamp(); prevIt = it; motions.push_back(mi); } } // Construct the graph. cerr << "\033[31mConstruct the graph...\033[0m" << endl; // Vertex offset addVertexSE2(optimizer, initialStereoPose, 0); for(int i = 0; i < motions.size(); i++) { const SE2& odomMotion = motions[i].odomMotion; const SE2& stereoMotion = motions[i].stereoMotion; // add the edge // stereo vo and odom measurement. OdomAndStereoMotion meas; meas.StereoMotion = stereoMotion; meas.odomMotion = odomMotion; addEdgeCalib(optimizer, 0, meas, Eigen::Matrix3d::Identity()); } cerr << "Done..." << endl << endl; // if you want check the component of your graph, uncomment the CHECK_GRAPH #ifdef CHECK_GRAPH for(auto it:optimizer.edges()) { VertexSE2* v = static_cast<VertexSE2*>(it->vertex(0)); cout << v->id() << endl; } #endif // optimize. optimize(optimizer, 10); Eigen::Vector3d result = getEstimation(optimizer); cerr << "\033[1;32mCalibrated stereo offset (x, y, theta):\033[0m" << result[0] << " " << result [1] << " " << result[2] << endl << endl; // If you want see how's its closed form solution, uncomment the CLOSED_FORM if(use_cfs){ cerr << "\033[31mPerforming the closed form solution...\033[0m" << endl; SE2 closedFormStereo; ClosedFormCalibration::calibrate(motions, closedFormStereo); cerr << "\033[1;32mDone... closed form Calibrated stereo offset (x, y, theta):\033[0m" << closedFormStereo.toVector().transpose() << endl; } return 0; }
int main(int argc, char** argv) { bool fixLaser; int maxIterations; bool verbose; string inputFilename; string outputfilename; string rawFilename; string odomTestFilename; string dumpGraphFilename; // command line parsing CommandArgs commandLineArguments; commandLineArguments.param("i", maxIterations, 10, "perform n iterations"); commandLineArguments.param("v", verbose, false, "verbose output of the optimization process"); commandLineArguments.param("o", outputfilename, "", "output final version of the graph"); commandLineArguments.param("test", odomTestFilename, "", "apply odometry calibration to some test data"); commandLineArguments.param("dump", dumpGraphFilename, "", "write the graph to the disk"); commandLineArguments.param("fixLaser", fixLaser, false, "keep the laser offset fixed during optimization"); commandLineArguments.paramLeftOver("gm2dl-input", inputFilename, "", "gm2dl file which will be processed"); commandLineArguments.paramLeftOver("raw-log", rawFilename, "", "raw log file containing the odometry"); commandLineArguments.parseArgs(argc, argv); SparseOptimizer optimizer; optimizer.setVerbose(verbose); optimizer.setForceStopFlag(&hasToStop); allocateSolverForSclam(optimizer); // loading if (! Gm2dlIO::readGm2dl(inputFilename, optimizer, false)) { cerr << "Error while loading gm2dl file" << endl; } DataQueue robotLaserQueue; int numLaserOdom = Gm2dlIO::readRobotLaser(rawFilename, robotLaserQueue); if (numLaserOdom == 0) { cerr << "No raw information read" << endl; return 0; } cerr << "Read " << numLaserOdom << " laser readings from file" << endl; bool gaugeFreedom = optimizer.gaugeFreedom(); OptimizableGraph::Vertex* gauge = optimizer.findGauge(); if (gaugeFreedom) { if (! gauge) { cerr << "# cannot find a vertex to fix in this thing" << endl; return 2; } else { cerr << "# graph is fixed by node " << gauge->id() << endl; gauge->setFixed(true); } } else { cerr << "# graph is fixed by priors" << endl; } addOdometryCalibLinksDifferential(optimizer, robotLaserQueue); // sanity check HyperDijkstra d(&optimizer); UniformCostFunction f; d.shortestPaths(gauge, &f); //cerr << PVAR(d.visited().size()) << endl; if (d.visited().size()!=optimizer.vertices().size()) { cerr << CL_RED("Warning: d.visited().size() != optimizer.vertices().size()") << endl; cerr << "visited: " << d.visited().size() << endl; cerr << "vertices: " << optimizer.vertices().size() << endl; if (1) for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer.vertices().begin(); it != optimizer.vertices().end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second); if (d.visited().count(v) == 0) { cerr << "\t unvisited vertex " << it->first << " " << (void*)v << endl; v->setFixed(true); } } } for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer.vertices().begin(); it != optimizer.vertices().end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second); if (v->fixed()) { cerr << "\t fixed vertex " << it->first << endl; } } VertexSE2* laserOffset = dynamic_cast<VertexSE2*>(optimizer.vertex(Gm2dlIO::ID_LASERPOSE)); VertexOdomDifferentialParams* odomParamsVertex = dynamic_cast<VertexOdomDifferentialParams*>(optimizer.vertex(Gm2dlIO::ID_ODOMCALIB)); if (fixLaser) { cerr << "Fix position of the laser offset" << endl; laserOffset->setFixed(true); } signal(SIGINT, sigquit_handler); cerr << "Doing full estimation" << endl; optimizer.initializeOptimization(); optimizer.computeActiveErrors(); cerr << "Initial chi2 = " << FIXED(optimizer.chi2()) << endl; int i=optimizer.optimize(maxIterations); if (maxIterations > 0 && !i){ cerr << "optimize failed, result might be invalid" << endl; } if (laserOffset) { cerr << "Calibrated laser offset (x, y, theta):" << laserOffset->estimate().toVector().transpose() << endl; } if (odomParamsVertex) { cerr << "Odometry parameters (scaling factors (v_l, v_r, b)): " << odomParamsVertex->estimate().transpose() << endl; } cerr << "vertices: " << optimizer.vertices().size() << endl; cerr << "edges: " << optimizer.edges().size() << endl; if (dumpGraphFilename.size() > 0) { cerr << "Writing " << dumpGraphFilename << " ... "; optimizer.save(dumpGraphFilename.c_str()); cerr << "done." << endl; } // optional input of a seperate file for applying the odometry calibration if (odomTestFilename.size() > 0) { DataQueue testRobotLaserQueue; int numTestOdom = Gm2dlIO::readRobotLaser(odomTestFilename, testRobotLaserQueue); if (numTestOdom == 0) { cerr << "Unable to read test data" << endl; } else { ofstream rawStream("odometry_raw.txt"); ofstream calibratedStream("odometry_calibrated.txt"); const Vector3d& odomCalib = odomParamsVertex->estimate(); RobotLaser* prev = dynamic_cast<RobotLaser*>(testRobotLaserQueue.buffer().begin()->second); SE2 prevCalibratedPose = prev->odomPose(); for (DataQueue::Buffer::const_iterator it = testRobotLaserQueue.buffer().begin(); it != testRobotLaserQueue.buffer().end(); ++it) { RobotLaser* cur = dynamic_cast<RobotLaser*>(it->second); assert(cur); double dt = cur->timestamp() - prev->timestamp(); SE2 motion = prev->odomPose().inverse() * cur->odomPose(); // convert to velocity measurment MotionMeasurement motionMeasurement(motion.translation().x(), motion.translation().y(), motion.rotation().angle(), dt); VelocityMeasurement velocityMeasurement = OdomConvert::convertToVelocity(motionMeasurement); // apply calibration VelocityMeasurement calibratedVelocityMeasurment = velocityMeasurement; calibratedVelocityMeasurment.setVl(odomCalib(0) * calibratedVelocityMeasurment.vl()); calibratedVelocityMeasurment.setVr(odomCalib(1) * calibratedVelocityMeasurment.vr()); MotionMeasurement mm = OdomConvert::convertToMotion(calibratedVelocityMeasurment, odomCalib(2)); // combine calibrated odometry with the previous pose SE2 remappedOdom; remappedOdom.fromVector(mm.measurement()); SE2 calOdomPose = prevCalibratedPose * remappedOdom; // write output rawStream << prev->odomPose().translation().x() << " " << prev->odomPose().translation().y() << " " << prev->odomPose().rotation().angle() << endl; calibratedStream << calOdomPose.translation().x() << " " << calOdomPose.translation().y() << " " << calOdomPose.rotation().angle() << endl; prevCalibratedPose = calOdomPose; prev = cur; } } } if (outputfilename.size() > 0) { Gm2dlIO::updateLaserData(optimizer); cerr << "Writing " << outputfilename << " ... "; bool writeStatus = Gm2dlIO::writeGm2dl(outputfilename, optimizer); cerr << (writeStatus ? "done." : "failed") << endl; } return 0; }
void Optimizer::optimizeUseG2O() { // create the linear solver BlockSolverX::LinearSolverType * linearSolver = new LinearSolverCSparse<BlockSolverX::PoseMatrixType>(); // create the block solver on top of the linear solver BlockSolverX* blockSolver = new BlockSolverX(linearSolver); // create the algorithm to carry out the optimization //OptimizationAlgorithmGaussNewton* optimizationAlgorithm = new OptimizationAlgorithmGaussNewton(blockSolver); OptimizationAlgorithmLevenberg* optimizationAlgorithm = new OptimizationAlgorithmLevenberg(blockSolver); // NOTE: We skip to fix a variable here, either this is stored in the file // itself or Levenberg will handle it. // create the optimizer to load the data and carry out the optimization SparseOptimizer optimizer; SparseOptimizer::initMultiThreading(); optimizer.setVerbose(true); optimizer.setAlgorithm(optimizationAlgorithm); { pcl::ScopeTime time("G2O setup Graph vertices"); for (size_t cloud_count = 0; cloud_count < m_pointClouds.size(); ++cloud_count) { VertexSE3 *vertex = new VertexSE3; vertex->setId(cloud_count); Isometry3D affine = Isometry3D::Identity(); affine.linear() = m_pointClouds[cloud_count]->sensor_orientation_.toRotationMatrix().cast<Isometry3D::Scalar>(); affine.translation() = m_pointClouds[cloud_count]->sensor_origin_.block<3, 1>(0, 0).cast<Isometry3D::Scalar>(); vertex->setEstimate(affine); optimizer.addVertex(vertex); } optimizer.vertex(0)->setFixed(true); } { pcl::ScopeTime time("G2O setup Graph edges"); double trans_noise = 0.5, rot_noise = 0.5235; EdgeSE3::InformationType infomation = EdgeSE3::InformationType::Zero(); infomation.block<3, 3>(0, 0) << trans_noise * trans_noise, 0, 0, 0, trans_noise * trans_noise, 0, 0, 0, trans_noise * trans_noise; infomation.block<3, 3>(3, 3) << rot_noise * rot_noise, 0, 0, 0, rot_noise * rot_noise, 0, 0, 0, rot_noise * rot_noise; for (size_t pair_count = 0; pair_count < m_cloudPairs.size(); ++pair_count) { CloudPair pair = m_cloudPairs[pair_count]; int from = pair.corresIdx.first; int to = pair.corresIdx.second; EdgeSE3 *edge = new EdgeSE3; edge->vertices()[0] = optimizer.vertex(from); edge->vertices()[1] = optimizer.vertex(to); Eigen::Matrix<double, 6, 6> ATA = Eigen::Matrix<double, 6, 6>::Zero(); Eigen::Matrix<double, 6, 1> ATb = Eigen::Matrix<double, 6, 1>::Zero(); #pragma unroll 8 for (size_t point_count = 0; point_count < pair.corresPointIdx.size(); ++point_count) { int point_p = pair.corresPointIdx[point_count].first; int point_q = pair.corresPointIdx[point_count].second; PointType P = m_pointClouds[from]->points[point_p]; PointType Q = m_pointClouds[to]->points[point_q]; Eigen::Vector3d p = P.getVector3fMap().cast<double>(); Eigen::Vector3d q = Q.getVector3fMap().cast<double>(); Eigen::Vector3d Np = P.getNormalVector3fMap().cast<double>(); double b = (p - q).dot(Np); Eigen::Matrix<double, 6, 1> A_p; A_p.block<3, 1>(0, 0) = p.cross(Np); A_p.block<3, 1>(3, 0) = Np; ATA += A_p * A_p.transpose(); ATb += A_p * b; } Eigen::Matrix<double, 6, 1> X = ATA.ldlt().solve(ATb); Isometry3D measure = Isometry3D::Identity(); float beta = X[0]; float gammar = X[1]; float alpha = X[2]; measure.linear() = (Eigen::Matrix3d)Eigen::AngleAxisd(alpha, Eigen::Vector3d::UnitZ()) * Eigen::AngleAxisd(gammar, Eigen::Vector3d::UnitY()) * Eigen::AngleAxisd(beta, Eigen::Vector3d::UnitX()); measure.translation() = X.block<3, 1>(3, 0); edge->setMeasurement(measure); edge->setInformation(infomation); optimizer.addEdge(edge); } } optimizer.save("debug_preOpt.g2o"); { pcl::ScopeTime time("g2o optimizing"); optimizer.initializeOptimization(); optimizer.optimize(30); } optimizer.save("debug_postOpt.g2o"); for (size_t cloud_count = 0; cloud_count < m_pointClouds.size(); ++cloud_count) { CloudTypePtr tmp(new CloudType); Isometry3D trans = ((VertexSE3 *)optimizer.vertices()[cloud_count])->estimate(); Eigen::Affine3d affine; affine.linear() = trans.rotation(); affine.translation() = trans.translation(); pcl::transformPointCloudWithNormals(*m_pointClouds[cloud_count], *tmp, affine.cast<float>()); pcl::copyPointCloud(*tmp, *m_pointClouds[cloud_count]); } PCL_WARN("Opitimization DONE!!!!\n"); if (m_params.saveDirectory.length()) { if (boost::filesystem::exists(m_params.saveDirectory) && !boost::filesystem::is_directory(m_params.saveDirectory)) { boost::filesystem::remove(m_params.saveDirectory); } boost::filesystem::create_directories(m_params.saveDirectory); char filename[1024] = { 0 }; for (size_t i = 0; i < m_pointClouds.size(); ++i) { sprintf(filename, "%s/cloud_%d.ply", m_params.saveDirectory.c_str(), i); pcl::io::savePLYFileBinary(filename, *m_pointClouds[i]); } } }
int main() { double euc_noise = 0.01; // noise in position, m // double outlier_ratio = 0.1; SparseOptimizer optimizer; optimizer.setVerbose(false); // variable-size block solver BlockSolverX::LinearSolverType * linearSolver = new LinearSolverDense<g2o::BlockSolverX::PoseMatrixType>(); BlockSolverX * solver_ptr = new BlockSolverX(linearSolver); g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr); optimizer.setAlgorithm(solver); vector<Vector3d> true_points; for (size_t i=0;i<1000; ++i) { true_points.push_back(Vector3d((Sample::uniform()-0.5)*3, Sample::uniform()-0.5, Sample::uniform()+10)); } // set up two poses int vertex_id = 0; for (size_t i=0; i<2; ++i) { // set up rotation and translation for this node Vector3d t(0,0,i); Quaterniond q; q.setIdentity(); Eigen::Isometry3d cam; // camera pose cam = q; cam.translation() = t; // set up node VertexSE3 *vc = new VertexSE3(); vc->setEstimate(cam); vc->setId(vertex_id); // vertex id cerr << t.transpose() << " | " << q.coeffs().transpose() << endl; // set first cam pose fixed if (i==0) vc->setFixed(true); // add to optimizer optimizer.addVertex(vc); vertex_id++; } // set up point matches for (size_t i=0; i<true_points.size(); ++i) { // get two poses VertexSE3* vp0 = dynamic_cast<VertexSE3*>(optimizer.vertices().find(0)->second); VertexSE3* vp1 = dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second); // calculate the relative 3D position of the point Vector3d pt0,pt1; pt0 = vp0->estimate().inverse() * true_points[i]; pt1 = vp1->estimate().inverse() * true_points[i]; // add in noise pt0 += Vector3d(Sample::gaussian(euc_noise ), Sample::gaussian(euc_noise ), Sample::gaussian(euc_noise )); pt1 += Vector3d(Sample::gaussian(euc_noise ), Sample::gaussian(euc_noise ), Sample::gaussian(euc_noise )); // form edge, with normals in varioius positions Vector3d nm0, nm1; nm0 << 0, i, 1; nm1 << 0, i, 1; nm0.normalize(); nm1.normalize(); Edge_V_V_GICP * e // new edge with correct cohort for caching = new Edge_V_V_GICP(); e->setVertex(0, vp0); // first viewpoint e->setVertex(1, vp1); // second viewpoint EdgeGICP meas; meas.pos0 = pt0; meas.pos1 = pt1; meas.normal0 = nm0; meas.normal1 = nm1; e->setMeasurement(meas); // e->inverseMeasurement().pos() = -kp; meas = e->measurement(); // use this for point-plane e->information() = meas.prec0(0.01); // use this for point-point // e->information().setIdentity(); // e->setRobustKernel(true); //e->setHuberWidth(0.01); optimizer.addEdge(e); } // move second cam off of its true position VertexSE3* vc = dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second); Eigen::Isometry3d cam = vc->estimate(); cam.translation() = Vector3d(0,0,0.2); vc->setEstimate(cam); optimizer.initializeOptimization(); optimizer.computeActiveErrors(); cout << "Initial chi2 = " << FIXED(optimizer.chi2()) << endl; optimizer.setVerbose(true); optimizer.optimize(5); cout << endl << "Second vertex should be near 0,0,1" << endl; cout << dynamic_cast<VertexSE3*>(optimizer.vertices().find(0)->second) ->estimate().translation().transpose() << endl; cout << dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second) ->estimate().translation().transpose() << endl; }
int main(int argc, char** argv) { bool fixLaser; int maxIterations; bool verbose; string inputFilename; string outputfilename; string rawFilename; string odomTestFilename; string dumpGraphFilename; // command line parsing CommandArgs commandLineArguments; commandLineArguments.param("i", maxIterations, 10, "perform n iterations"); commandLineArguments.param("v", verbose, false, "verbose output of the optimization process"); commandLineArguments.param("o", outputfilename, "", "output final version of the graph"); commandLineArguments.param("test", odomTestFilename, "", "apply odometry calibration to some test data"); commandLineArguments.param("dump", dumpGraphFilename, "", "write the graph to the disk"); commandLineArguments.param("fixLaser", fixLaser, false, "keep the laser offset fixed during optimization"); commandLineArguments.paramLeftOver("gm2dl-input", inputFilename, "", "gm2dl file which will be processed"); commandLineArguments.paramLeftOver("raw-log", rawFilename, "", "raw log file containing the odometry"); commandLineArguments.parseArgs(argc, argv); SparseOptimizer optimizer; optimizer.setVerbose(verbose); allocateSolverForSclam(optimizer); // loading DataQueue odometryQueue; int numLaserOdom = Gm2dlIO::readRobotLaser(rawFilename, odometryQueue); if (numLaserOdom == 0) { cerr << "No raw information read" << endl; return 0; } cerr << "Read " << numLaserOdom << " laser readings from file" << endl; Eigen::Vector3d odomCalib(1., 1., 1.); SE2 initialLaserPose; DataQueue robotLaserQueue; int numRobotLaser = Gm2dlIO::readRobotLaser(inputFilename, robotLaserQueue); if (numRobotLaser == 0) { cerr << "No robot laser read" << endl; return 0; } else { RobotLaser* rl = dynamic_cast<RobotLaser*>(robotLaserQueue.buffer().begin()->second); initialLaserPose = rl->odomPose().inverse() * rl->laserPose(); cerr << PVAR(initialLaserPose.toVector().transpose()) << endl; } // adding the measurements vector<MotionInformation, Eigen::aligned_allocator<MotionInformation> > motions; { std::map<double, RobotData*>::const_iterator it = robotLaserQueue.buffer().begin(); std::map<double, RobotData*>::const_iterator prevIt = it++; for (; it != robotLaserQueue.buffer().end(); ++it) { MotionInformation mi; RobotLaser* prevLaser = dynamic_cast<RobotLaser*>(prevIt->second); RobotLaser* curLaser = dynamic_cast<RobotLaser*>(it->second); mi.laserMotion = prevLaser->laserPose().inverse() * curLaser->laserPose(); // get the motion of the robot in that time interval RobotLaser* prevOdom = dynamic_cast<RobotLaser*>(odometryQueue.findClosestData(prevLaser->timestamp())); RobotLaser* curOdom = dynamic_cast<RobotLaser*>(odometryQueue.findClosestData(curLaser->timestamp())); mi.odomMotion = prevOdom->odomPose().inverse() * curOdom->odomPose(); mi.timeInterval = prevOdom->timestamp() - curOdom->timestamp(); prevIt = it; motions.push_back(mi); } } if (1) { VertexSE2* laserOffset = new VertexSE2; laserOffset->setId(Gm2dlIO::ID_LASERPOSE); laserOffset->setEstimate(initialLaserPose); optimizer.addVertex(laserOffset); VertexOdomDifferentialParams* odomParamsVertex = new VertexOdomDifferentialParams; odomParamsVertex->setId(Gm2dlIO::ID_ODOMCALIB); odomParamsVertex->setEstimate(Eigen::Vector3d(1., 1., 1.)); optimizer.addVertex(odomParamsVertex); for (size_t i = 0; i < motions.size(); ++i) { const SE2& odomMotion = motions[i].odomMotion; const SE2& laserMotion = motions[i].laserMotion; const double& timeInterval = motions[i].timeInterval; // add the edge MotionMeasurement mm(odomMotion.translation().x(), odomMotion.translation().y(), odomMotion.rotation().angle(), timeInterval); OdomAndLaserMotion meas; meas.velocityMeasurement = OdomConvert::convertToVelocity(mm); meas.laserMotion = laserMotion; EdgeSE2PureCalib* calibEdge = new EdgeSE2PureCalib; calibEdge->setVertex(0, laserOffset); calibEdge->setVertex(1, odomParamsVertex); calibEdge->setInformation(Eigen::Matrix3d::Identity()); calibEdge->setMeasurement(meas); if (! optimizer.addEdge(calibEdge)) { cerr << "Error adding calib edge" << endl; delete calibEdge; } } if (fixLaser) { cerr << "Fix position of the laser offset" << endl; laserOffset->setFixed(true); } cerr << "\nPerforming full non-linear estimation" << endl; optimizer.initializeOptimization(); optimizer.computeActiveErrors(); optimizer.optimize(maxIterations); cerr << "Calibrated laser offset (x, y, theta):" << laserOffset->estimate().toVector().transpose() << endl; odomCalib = odomParamsVertex->estimate(); cerr << "Odometry parameters (scaling factors (v_l, v_r, b)): " << odomParamsVertex->estimate().transpose() << endl; optimizer.clear(); } // linear least squares for some parameters { Eigen::MatrixXd A(motions.size(), 2); Eigen::VectorXd x(motions.size()); for (size_t i = 0; i < motions.size(); ++i) { const SE2& odomMotion = motions[i].odomMotion; const SE2& laserMotion = motions[i].laserMotion; const double& timeInterval = motions[i].timeInterval; MotionMeasurement mm(odomMotion.translation().x(), odomMotion.translation().y(), odomMotion.rotation().angle(), timeInterval); VelocityMeasurement velMeas = OdomConvert::convertToVelocity(mm); A(i, 0) = velMeas.vl() * timeInterval; A(i, 1) = velMeas.vr() * timeInterval; x(i) = laserMotion.rotation().angle(); } //linearSolution = (A.transpose() * A).inverse() * A.transpose() * x; linearSolution = A.colPivHouseholderQr().solve(x); //cout << PVAR(linearSolution.transpose()) << endl; } //constructing non-linear least squares VertexSE2* laserOffset = new VertexSE2; laserOffset->setId(Gm2dlIO::ID_LASERPOSE); laserOffset->setEstimate(initialLaserPose); optimizer.addVertex(laserOffset); VertexBaseline* odomParamsVertex = new VertexBaseline; odomParamsVertex->setId(Gm2dlIO::ID_ODOMCALIB); odomParamsVertex->setEstimate(1.); optimizer.addVertex(odomParamsVertex); for (size_t i = 0; i < motions.size(); ++i) { const SE2& odomMotion = motions[i].odomMotion; const SE2& laserMotion = motions[i].laserMotion; const double& timeInterval = motions[i].timeInterval; // add the edge MotionMeasurement mm(odomMotion.translation().x(), odomMotion.translation().y(), odomMotion.rotation().angle(), timeInterval); OdomAndLaserMotion meas; meas.velocityMeasurement = OdomConvert::convertToVelocity(mm); meas.laserMotion = laserMotion; EdgeCalib* calibEdge = new EdgeCalib; calibEdge->setVertex(0, laserOffset); calibEdge->setVertex(1, odomParamsVertex); calibEdge->setInformation(Eigen::Matrix3d::Identity()); calibEdge->setMeasurement(meas); if (! optimizer.addEdge(calibEdge)) { cerr << "Error adding calib edge" << endl; delete calibEdge; } } if (fixLaser) { cerr << "Fix position of the laser offset" << endl; laserOffset->setFixed(true); } cerr << "\nPerforming partial non-linear estimation" << endl; optimizer.initializeOptimization(); optimizer.computeActiveErrors(); optimizer.optimize(maxIterations); cerr << "Calibrated laser offset (x, y, theta):" << laserOffset->estimate().toVector().transpose() << endl; odomCalib(0) = -1. * linearSolution(0) * odomParamsVertex->estimate(); odomCalib(1) = linearSolution(1) * odomParamsVertex->estimate(); odomCalib(2) = odomParamsVertex->estimate(); cerr << "Odometry parameters (scaling factors (v_l, v_r, b)): " << odomCalib.transpose() << endl; { SE2 closedFormLaser; Eigen::Vector3d closedFormOdom; ClosedFormCalibration::calibrate(motions, closedFormLaser, closedFormOdom); cerr << "\nObtaining closed form solution" << endl; cerr << "Calibrated laser offset (x, y, theta):" << closedFormLaser.toVector().transpose() << endl; cerr << "Odometry parameters (scaling factors (v_l, v_r, b)): " << closedFormOdom.transpose() << endl; } if (dumpGraphFilename.size() > 0) { cerr << "Writing " << dumpGraphFilename << " ... "; optimizer.save(dumpGraphFilename.c_str()); cerr << "done." << endl; } // optional input of a separate file for applying the odometry calibration if (odomTestFilename.size() > 0) { DataQueue testRobotLaserQueue; int numTestOdom = Gm2dlIO::readRobotLaser(odomTestFilename, testRobotLaserQueue); if (numTestOdom == 0) { cerr << "Unable to read test data" << endl; } else { ofstream rawStream("odometry_raw.txt"); ofstream calibratedStream("odometry_calibrated.txt"); RobotLaser* prev = dynamic_cast<RobotLaser*>(testRobotLaserQueue.buffer().begin()->second); SE2 prevCalibratedPose = prev->odomPose(); for (DataQueue::Buffer::const_iterator it = testRobotLaserQueue.buffer().begin(); it != testRobotLaserQueue.buffer().end(); ++it) { RobotLaser* cur = dynamic_cast<RobotLaser*>(it->second); assert(cur); double dt = cur->timestamp() - prev->timestamp(); SE2 motion = prev->odomPose().inverse() * cur->odomPose(); // convert to velocity measurement MotionMeasurement motionMeasurement(motion.translation().x(), motion.translation().y(), motion.rotation().angle(), dt); VelocityMeasurement velocityMeasurement = OdomConvert::convertToVelocity(motionMeasurement); // apply calibration VelocityMeasurement calibratedVelocityMeasurment = velocityMeasurement; calibratedVelocityMeasurment.setVl(odomCalib(0) * calibratedVelocityMeasurment.vl()); calibratedVelocityMeasurment.setVr(odomCalib(1) * calibratedVelocityMeasurment.vr()); MotionMeasurement mm = OdomConvert::convertToMotion(calibratedVelocityMeasurment, odomCalib(2)); // combine calibrated odometry with the previous pose SE2 remappedOdom; remappedOdom.fromVector(mm.measurement()); SE2 calOdomPose = prevCalibratedPose * remappedOdom; // write output rawStream << prev->odomPose().translation().x() << " " << prev->odomPose().translation().y() << " " << prev->odomPose().rotation().angle() << endl; calibratedStream << calOdomPose.translation().x() << " " << calOdomPose.translation().y() << " " << calOdomPose.rotation().angle() << endl; prevCalibratedPose = calOdomPose; prev = cur; } } } return 0; }
int main(int argc, char **argv) { /************************************************************************ * Input handling * ************************************************************************/ float rows, cols, gain, square_size; float resolution, max_range, usable_range, angle, threshold; string g2oFilename, mapFilename; g2o::CommandArgs arg; arg.param("resolution", resolution, 0.05f, "resolution of the map (how much is in meters a pixel)"); arg.param("threshold", threshold, -1.0f, "threshold to apply to the frequency map (values under the threshold are discarded)"); arg.param("rows", rows, 0, "impose the resulting map to have this number of rows"); arg.param("cols", cols, 0, "impose the resulting map to have this number of columns"); arg.param("max_range", max_range, -1.0f, "max laser range to consider for map building"); arg.param("usable_range", usable_range, -1.0f, "usable laser range for map building"); arg.param("gain", gain, 1, "gain to impose to the pixels of the map"); arg.param("square_size", square_size, 1, "square size of the region where increment the hits"); arg.param("angle", angle, 0, "rotate the map of x degrees"); arg.paramLeftOver("input_graph.g2o", g2oFilename, "", "input g2o graph to use to build the map", false); arg.paramLeftOver("output_map", mapFilename, "", "output filename where to save the map (without extension)", false); arg.parseArgs(argc, argv); angle = angle*M_PI/180.0; /************************************************************************ * Loading Graph * ************************************************************************/ // Load graph typedef BlockSolver< BlockSolverTraits<-1, -1> > SlamBlockSolver; typedef LinearSolverCSparse<SlamBlockSolver::PoseMatrixType> SlamLinearSolver; SlamLinearSolver *linearSolver = new SlamLinearSolver(); linearSolver->setBlockOrdering(false); SlamBlockSolver *blockSolver = new SlamBlockSolver(linearSolver); OptimizationAlgorithmGaussNewton *solverGauss = new OptimizationAlgorithmGaussNewton(blockSolver); SparseOptimizer *graph = new SparseOptimizer(); graph->setAlgorithm(solverGauss); graph->load(g2oFilename.c_str()); // Sort verteces vector<int> vertexIds(graph->vertices().size()); int k = 0; for(OptimizableGraph::VertexIDMap::iterator it = graph->vertices().begin(); it != graph->vertices().end(); ++it) { vertexIds[k++] = (it->first); } sort(vertexIds.begin(), vertexIds.end()); /************************************************************************ * Compute map size * ************************************************************************/ // Check the entire graph to find map bounding box Eigen::Matrix2d boundingBox = Eigen::Matrix2d::Zero(); std::vector<RobotLaser*> robotLasers; std::vector<SE2> robotPoses; double xmin=std::numeric_limits<double>::max(); double xmax=std::numeric_limits<double>::min(); double ymin=std::numeric_limits<double>::max(); double ymax=std::numeric_limits<double>::min(); SE2 baseTransform(0,0,angle); for(size_t i = 0; i < vertexIds.size(); ++i) { OptimizableGraph::Vertex *_v = graph->vertex(vertexIds[i]); VertexSE2 *v = dynamic_cast<VertexSE2*>(_v); if(!v) { continue; } v->setEstimate(baseTransform*v->estimate()); OptimizableGraph::Data *d = v->userData(); while(d) { RobotLaser *robotLaser = dynamic_cast<RobotLaser*>(d); if(!robotLaser) { d = d->next(); continue; } robotLasers.push_back(robotLaser); robotPoses.push_back(v->estimate()); double x = v->estimate().translation().x(); double y = v->estimate().translation().y(); xmax = xmax > x+usable_range ? xmax : x+usable_range; ymax = ymax > y+usable_range ? ymax : y+usable_range; xmin = xmin < x-usable_range ? xmin : x-usable_range; ymin = ymin < y-usable_range ? ymin : y-usable_range; d = d->next(); } } boundingBox(0,0)=xmin; boundingBox(0,1)=xmax; boundingBox(1,0)=ymin; boundingBox(1,1)=ymax; std::cout << "Found " << robotLasers.size() << " laser scans"<< std::endl; std::cout << "Bounding box: " << std::endl << boundingBox << std::endl; if(robotLasers.size() == 0) { std::cout << "No laser scans found ... quitting!" << std::endl; return 0; } /************************************************************************ * Compute the map * ************************************************************************/ // Create the map Eigen::Vector2i size; if(rows != 0 && cols != 0) { size = Eigen::Vector2i(rows, cols); } else { size = Eigen::Vector2i((boundingBox(0, 1) - boundingBox(0, 0))/ resolution, (boundingBox(1, 1) - boundingBox(1, 0))/ resolution); } std::cout << "Map size: " << size.transpose() << std::endl; if(size.x() == 0 || size.y() == 0) { std::cout << "Zero map size ... quitting!" << std::endl; return 0; } //Eigen::Vector2f offset(-size.x() * resolution / 2.0f, -size.y() * resolution / 2.0f); Eigen::Vector2f offset(boundingBox(0, 0),boundingBox(1, 0)); FrequencyMapCell unknownCell; FrequencyMap map = FrequencyMap(resolution, offset, size, unknownCell); for(size_t i = 0; i < vertexIds.size(); ++i) { OptimizableGraph::Vertex *_v = graph->vertex(vertexIds[i]); VertexSE2 *v = dynamic_cast<VertexSE2*>(_v); if(!v) { continue; } OptimizableGraph::Data *d = v->userData(); SE2 robotPose = v->estimate(); while(d) { RobotLaser *robotLaser = dynamic_cast<RobotLaser*>(d); if(!robotLaser) { d = d->next(); continue; } map.integrateScan(robotLaser, robotPose, max_range, usable_range, gain, square_size); d = d->next(); } } /************************************************************************ * Save map image * ************************************************************************/ cv::Mat mapImage(map.rows(), map.cols(), CV_8UC1); mapImage.setTo(cv::Scalar(0)); for(int c = 0; c < map.cols(); c++) { for(int r = 0; r < map.rows(); r++) { if(map(r, c).misses() == 0 && map(r, c).hits() == 0) { mapImage.at<unsigned char>(r, c) = 127; } else { float fraction = (float)map(r, c).hits()/(float)(map(r, c).hits()+map(r, c).misses()); if (threshold > 0 && fraction > threshold) mapImage.at<unsigned char>(r, c) = 0; else if (threshold > 0 && fraction <= threshold) mapImage.at<unsigned char>(r, c) = 255; else { float val = 255*(1-fraction); mapImage.at<unsigned char>(r, c) = (unsigned char)val; } } // else if(map(r, c).hits() > threshold) { // mapImage.at<unsigned char>(r, c) = 255; // } // else { // mapImage.at<unsigned char>(r, c) = 0; // } } } cv::imwrite(mapFilename + ".png", mapImage); /************************************************************************ * Write yaml file * ************************************************************************/ std::ofstream ofs(string(mapFilename + ".yaml").c_str()); Eigen::Vector3f origin(0.0f, 0.0f, 0.0f); ofs << "image: " << mapFilename << ".png" << std::endl << "resolution: " << resolution << std::endl << "origin: [" << origin.x() << ", " << origin.y() << ", " << origin.z() << "]" << std::endl << "negate: 0" << std::endl << "occupied_thresh: " << 0.65f << std::endl << "free_thresh: " << 0.2f << std::endl; return 0; }
int main(int argc, char **argv) { int num_points = 0; // check for arg, # of points to use in projection SBA if (argc > 1) num_points = atoi(argv[1]); double euc_noise = 0.1; // noise in position, m double pix_noise = 1.0; // pixel noise // double outlier_ratio = 0.1; SparseOptimizer optimizer; optimizer.setVerbose(false); // variable-size block solver BlockSolverX::LinearSolverType * linearSolver = new LinearSolverCSparse<g2o ::BlockSolverX::PoseMatrixType>(); BlockSolverX * solver_ptr = new BlockSolverX(linearSolver); g2o::OptimizationAlgorithmLevenberg* solver = new g2o::OptimizationAlgorithmLevenberg(solver_ptr); optimizer.setAlgorithm(solver); vector<Vector3d> true_points; for (size_t i=0;i<1000; ++i) { true_points.push_back(Vector3d((Sample::uniform()-0.5)*3, Sample::uniform()-0.5, Sample::uniform()+10)); } // set up camera params Vector2d focal_length(500,500); // pixels Vector2d principal_point(320,240); // 640x480 image double baseline = 0.075; // 7.5 cm baseline // set up camera params and projection matrices on vertices g2o::VertexSCam::setKcam(focal_length[0],focal_length[1], principal_point[0],principal_point[1], baseline); // set up two poses int vertex_id = 0; for (size_t i=0; i<2; ++i) { // set up rotation and translation for this node Vector3d t(0,0,i); Quaterniond q; q.setIdentity(); Eigen::Isometry3d cam; // camera pose cam = q; cam.translation() = t; // set up node VertexSCam *vc = new VertexSCam(); vc->setEstimate(cam); vc->setId(vertex_id); // vertex id cerr << t.transpose() << " | " << q.coeffs().transpose() << endl; // set first cam pose fixed if (i==0) vc->setFixed(true); // make sure projection matrices are set vc->setAll(); // add to optimizer optimizer.addVertex(vc); vertex_id++; } // set up point matches for GICP for (size_t i=0; i<true_points.size(); ++i) { // get two poses VertexSE3* vp0 = dynamic_cast<VertexSE3*>(optimizer.vertices().find(0)->second); VertexSE3* vp1 = dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second); // calculate the relative 3D position of the point Vector3d pt0,pt1; pt0 = vp0->estimate().inverse() * true_points[i]; pt1 = vp1->estimate().inverse() * true_points[i]; // add in noise pt0 += Vector3d(Sample::gaussian(euc_noise ), Sample::gaussian(euc_noise ), Sample::gaussian(euc_noise )); pt1 += Vector3d(Sample::gaussian(euc_noise ), Sample::gaussian(euc_noise ), Sample::gaussian(euc_noise )); // form edge, with normals in varioius positions Vector3d nm0, nm1; nm0 << 0, i, 1; nm1 << 0, i, 1; nm0.normalize(); nm1.normalize(); Edge_V_V_GICP * e // new edge with correct cohort for caching = new Edge_V_V_GICP(); e->vertices()[0] // first viewpoint = dynamic_cast<OptimizableGraph::Vertex*>(vp0); e->vertices()[1] // second viewpoint = dynamic_cast<OptimizableGraph::Vertex*>(vp1); EdgeGICP meas; meas.pos0 = pt0; meas.pos1 = pt1; meas.normal0 = nm0; meas.normal1 = nm1; e->setMeasurement(meas); meas = e->measurement(); // e->inverseMeasurement().pos() = -kp; // use this for point-plane e->information() = meas.prec0(0.01); // use this for point-point // e->information().setIdentity(); // e->setRobustKernel(true); //e->setHuberWidth(0.01); optimizer.addEdge(e); } // set up SBA projections with some number of points true_points.clear(); for (int i=0;i<num_points; ++i) { true_points.push_back(Vector3d((Sample::uniform()-0.5)*3, Sample::uniform()-0.5, Sample::uniform()+10)); } // add point projections to this vertex for (size_t i=0; i<true_points.size(); ++i) { g2o::VertexSBAPointXYZ * v_p = new g2o::VertexSBAPointXYZ(); v_p->setId(vertex_id++); v_p->setMarginalized(true); v_p->setEstimate(true_points.at(i) + Vector3d(Sample::gaussian(1), Sample::gaussian(1), Sample::gaussian(1))); optimizer.addVertex(v_p); for (size_t j=0; j<2; ++j) { Vector3d z; dynamic_cast<g2o::VertexSCam*> (optimizer.vertices().find(j)->second) ->mapPoint(z,true_points.at(i)); if (z[0]>=0 && z[1]>=0 && z[0]<640 && z[1]<480) { z += Vector3d(Sample::gaussian(pix_noise), Sample::gaussian(pix_noise), Sample::gaussian(pix_noise/16.0)); g2o::Edge_XYZ_VSC * e = new g2o::Edge_XYZ_VSC(); e->vertices()[0] = dynamic_cast<g2o::OptimizableGraph::Vertex*>(v_p); e->vertices()[1] = dynamic_cast<g2o::OptimizableGraph::Vertex*> (optimizer.vertices().find(j)->second); e->setMeasurement(z); //e->inverseMeasurement() = -z; e->information() = Matrix3d::Identity(); //e->setRobustKernel(false); //e->setHuberWidth(1); optimizer.addEdge(e); } } } // done with adding projection points // move second cam off of its true position VertexSE3* vc = dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second); Eigen::Isometry3d cam = vc->estimate(); cam.translation() = Vector3d(-0.1,0.1,0.2); vc->setEstimate(cam); optimizer.initializeOptimization(); optimizer.computeActiveErrors(); cout << "Initial chi2 = " << FIXED(optimizer.chi2()) << endl; optimizer.setVerbose(true); optimizer.optimize(20); cout << endl << "Second vertex should be near 0,0,1" << endl; cout << dynamic_cast<VertexSE3*>(optimizer.vertices().find(0)->second) ->estimate().translation().transpose() << endl; cout << dynamic_cast<VertexSE3*>(optimizer.vertices().find(1)->second) ->estimate().translation().transpose() << endl; }
int main(int argc, char** argv) { OptimizableGraph::initMultiThreading(); int maxIterations; bool verbose; string inputFilename; string gnudump; string outputfilename; string solverProperties; string strSolver; string loadLookup; bool initialGuess; bool initialGuessOdometry; bool marginalize; bool listTypes; bool listSolvers; bool listRobustKernels; bool incremental; bool guiOut; int gaugeId; string robustKernel; bool computeMarginals; bool printSolverProperties; double huberWidth; double gain; int maxIterationsWithGain; //double lambdaInit; int updateGraphEachN = 10; string statsFile; string summaryFile; bool nonSequential; // command line parsing std::vector<int> gaugeList; CommandArgs arg; arg.param("i", maxIterations, 5, "perform n iterations, if negative consider the gain"); arg.param("gain", gain, 1e-6, "the gain used to stop optimization (default = 1e-6)"); arg.param("ig",maxIterationsWithGain, std::numeric_limits<int>::max(), "Maximum number of iterations with gain enabled (default: inf)"); arg.param("v", verbose, false, "verbose output of the optimization process"); arg.param("guess", initialGuess, false, "initial guess based on spanning tree"); arg.param("guessOdometry", initialGuessOdometry, false, "initial guess based on odometry"); arg.param("inc", incremental, false, "run incremetally"); arg.param("update", updateGraphEachN, 10, "updates after x odometry nodes"); arg.param("guiout", guiOut, false, "gui output while running incrementally"); arg.param("marginalize", marginalize, false, "on or off"); arg.param("printSolverProperties", printSolverProperties, false, "print the properties of the solver"); arg.param("solverProperties", solverProperties, "", "set the internal properties of a solver,\n\te.g., initialLambda=0.0001,maxTrialsAfterFailure=2"); arg.param("gnudump", gnudump, "", "dump to gnuplot data file"); arg.param("robustKernel", robustKernel, "", "use this robust error function"); arg.param("robustKernelWidth", huberWidth, -1., "width for the robust Kernel (only if robustKernel)"); arg.param("computeMarginals", computeMarginals, false, "computes the marginal covariances of something. FOR TESTING ONLY"); arg.param("gaugeId", gaugeId, -1, "force the gauge"); arg.param("o", outputfilename, "", "output final version of the graph"); arg.param("solver", strSolver, "gn_var", "specify which solver to use underneat\n\t {gn_var, lm_fix3_2, gn_fix6_3, lm_fix7_3}"); #ifndef G2O_DISABLE_DYNAMIC_LOADING_OF_LIBRARIES string dummy; arg.param("solverlib", dummy, "", "specify a solver library which will be loaded"); arg.param("typeslib", dummy, "", "specify a types library which will be loaded"); #endif arg.param("stats", statsFile, "", "specify a file for the statistics"); arg.param("listTypes", listTypes, false, "list the registered types"); arg.param("listRobustKernels", listRobustKernels, false, "list the registered robust kernels"); arg.param("listSolvers", listSolvers, false, "list the available solvers"); arg.param("renameTypes", loadLookup, "", "create a lookup for loading types into other types,\n\t TAG_IN_FILE=INTERNAL_TAG_FOR_TYPE,TAG2=INTERNAL2\n\t e.g., VERTEX_CAM=VERTEX_SE3:EXPMAP"); arg.param("gaugeList", gaugeList, std::vector<int>(), "set the list of gauges separated by commas without spaces \n e.g: 1,2,3,4,5 "); arg.param("summary", summaryFile, "", "append a summary of this optimization run to the summary file passed as argument"); arg.paramLeftOver("graph-input", inputFilename, "", "graph file which will be processed", true); arg.param("nonSequential", nonSequential, false, "apply the robust kernel only on loop closures and not odometries"); arg.parseArgs(argc, argv); if (verbose) { cout << "# Used Compiler: " << G2O_CXX_COMPILER << endl; } #ifndef G2O_DISABLE_DYNAMIC_LOADING_OF_LIBRARIES // registering all the types from the libraries DlWrapper dlTypesWrapper; loadStandardTypes(dlTypesWrapper, argc, argv); // register all the solvers DlWrapper dlSolverWrapper; loadStandardSolver(dlSolverWrapper, argc, argv); #else if (verbose) cout << "# linked version of g2o" << endl; #endif OptimizationAlgorithmFactory* solverFactory = OptimizationAlgorithmFactory::instance(); if (listSolvers) { solverFactory->listSolvers(cout); } if (listTypes) { Factory::instance()->printRegisteredTypes(cout, true); } if (listRobustKernels) { std::vector<std::string> kernels; RobustKernelFactory::instance()->fillKnownKernels(kernels); cout << "Robust Kernels:" << endl; for (size_t i = 0; i < kernels.size(); ++i) { cout << kernels[i] << endl; } } SparseOptimizer optimizer; optimizer.setVerbose(verbose); optimizer.setForceStopFlag(&hasToStop); SparseOptimizerTerminateAction* terminateAction = 0; if (maxIterations < 0) { cerr << "# setup termination criterion based on the gain of the iteration" << endl; maxIterations = maxIterationsWithGain; terminateAction = new SparseOptimizerTerminateAction; terminateAction->setGainThreshold(gain); terminateAction->setMaxIterations(maxIterationsWithGain); optimizer.addPostIterationAction(terminateAction); } // allocating the desired solver + testing whether the solver is okay OptimizationAlgorithmProperty solverProperty; optimizer.setAlgorithm(solverFactory->construct(strSolver, solverProperty)); if (! optimizer.solver()) { cerr << "Error allocating solver. Allocating \"" << strSolver << "\" failed!" << endl; return 0; } if (solverProperties.size() > 0) { bool updateStatus = optimizer.solver()->updatePropertiesFromString(solverProperties); if (! updateStatus) { cerr << "Failure while updating the solver properties from the given string" << endl; } } if (solverProperties.size() > 0 || printSolverProperties) { optimizer.solver()->printProperties(cerr); } // Loading the input data if (loadLookup.size() > 0) { optimizer.setRenamedTypesFromString(loadLookup); } if (inputFilename.size() == 0) { cerr << "No input data specified" << endl; return 0; } else if (inputFilename == "-") { cerr << "Read input from stdin" << endl; if (!optimizer.load(cin)) { cerr << "Error loading graph" << endl; return 2; } } else { cerr << "Read input from " << inputFilename << endl; ifstream ifs(inputFilename.c_str()); if (!ifs) { cerr << "Failed to open file" << endl; return 1; } if (!optimizer.load(ifs)) { cerr << "Error loading graph" << endl; return 2; } } cerr << "Loaded " << optimizer.vertices().size() << " vertices" << endl; cerr << "Loaded " << optimizer.edges().size() << " edges" << endl; if (optimizer.vertices().size() == 0) { cerr << "Graph contains no vertices" << endl; return 1; } set<int> vertexDimensions = optimizer.dimensions(); if (! optimizer.isSolverSuitable(solverProperty, vertexDimensions)) { cerr << "The selected solver is not suitable for optimizing the given graph" << endl; return 3; } assert (optimizer.solver()); //optimizer.setMethod(str2method(strMethod)); //optimizer.setUserLambdaInit(lambdaInit); // check for vertices to fix to remove DoF bool gaugeFreedom = optimizer.gaugeFreedom(); OptimizableGraph::Vertex* gauge=0; if (gaugeList.size()){ cerr << "Fixing gauges: "; for (size_t i=0; i<gaugeList.size(); i++){ int id=gaugeList[i]; OptimizableGraph::Vertex* v=optimizer.vertex(id); if (!v){ cerr << "fatal, not found the vertex of id " << id << " in the gaugeList. Aborting"; return -1; } else { if (i==0) gauge = v; cerr << v->id() << " "; v->setFixed(1); } } cerr << endl; gaugeFreedom = false; } else { gauge=optimizer.findGauge(); } if (gaugeFreedom) { if (! gauge) { cerr << "# cannot find a vertex to fix in this thing" << endl; return 2; } else { cerr << "# graph is fixed by node " << gauge->id() << endl; gauge->setFixed(true); } } else { cerr << "# graph is fixed by priors or already fixed vertex" << endl; } // if schur, we wanna marginalize the landmarks... if (marginalize || solverProperty.requiresMarginalize) { int maxDim = *vertexDimensions.rbegin(); int minDim = *vertexDimensions.begin(); if (maxDim != minDim) { cerr << "# Preparing Marginalization of the Landmarks ... "; for (HyperGraph::VertexIDMap::iterator it=optimizer.vertices().begin(); it!=optimizer.vertices().end(); it++){ OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second); if (v->dimension() != maxDim) { v->setMarginalized(true); } } cerr << "done." << endl; } } if (robustKernel.size() > 0) { AbstractRobustKernelCreator* creator = RobustKernelFactory::instance()->creator(robustKernel); cerr << "# Preparing robust error function ... "; if (creator) { if (nonSequential) { for (SparseOptimizer::EdgeSet::iterator it = optimizer.edges().begin(); it != optimizer.edges().end(); ++it) { SparseOptimizer::Edge* e = dynamic_cast<SparseOptimizer::Edge*>(*it); if (e->vertices().size() >= 2 && std::abs(e->vertex(0)->id() - e->vertex(1)->id()) != 1) { e->setRobustKernel(creator->construct()); if (huberWidth > 0) e->robustKernel()->setDelta(huberWidth); } } } else { for (SparseOptimizer::EdgeSet::iterator it = optimizer.edges().begin(); it != optimizer.edges().end(); ++it) { SparseOptimizer::Edge* e = dynamic_cast<SparseOptimizer::Edge*>(*it); e->setRobustKernel(creator->construct()); if (huberWidth > 0) e->robustKernel()->setDelta(huberWidth); } } cerr << "done." << endl; } else { cerr << "Unknown Robust Kernel: " << robustKernel << endl; } } // sanity check HyperDijkstra d(&optimizer); UniformCostFunction f; d.shortestPaths(gauge,&f); //cerr << PVAR(d.visited().size()) << endl; if (d.visited().size()!=optimizer.vertices().size()) { cerr << CL_RED("Warning: d.visited().size() != optimizer.vertices().size()") << endl; cerr << "visited: " << d.visited().size() << endl; cerr << "vertices: " << optimizer.vertices().size() << endl; } if (incremental) { cerr << CL_RED("# Note: this variant performs batch steps in each time step") << endl; cerr << CL_RED("# For a variant which updates the Cholesky factor use the binary g2o_incremental") << endl; int incIterations = maxIterations; if (! arg.parsedParam("i")) { cerr << "# Setting default number of iterations" << endl; incIterations = 1; } int updateDisplayEveryN = updateGraphEachN; int maxDim = 0; cerr << "# incremental settings" << endl; cerr << "#\t solve every " << updateGraphEachN << endl; cerr << "#\t iterations " << incIterations << endl; SparseOptimizer::VertexIDMap vertices = optimizer.vertices(); for (SparseOptimizer::VertexIDMap::const_iterator it = vertices.begin(); it != vertices.end(); ++it) { const SparseOptimizer::Vertex* v = static_cast<const SparseOptimizer::Vertex*>(it->second); maxDim = max(maxDim, v->dimension()); } vector<SparseOptimizer::Edge*> edges; for (SparseOptimizer::EdgeSet::iterator it = optimizer.edges().begin(); it != optimizer.edges().end(); ++it) { SparseOptimizer::Edge* e = dynamic_cast<SparseOptimizer::Edge*>(*it); edges.push_back(e); } optimizer.edges().clear(); optimizer.vertices().clear(); optimizer.setVerbose(false); // sort the edges in a way that inserting them makes sense sort(edges.begin(), edges.end(), IncrementalEdgesCompare()); double cumTime = 0.; int vertexCount=0; int lastOptimizedVertexCount = 0; int lastVisUpdateVertexCount = 0; bool freshlyOptimized=false; bool firstRound = true; HyperGraph::VertexSet verticesAdded; HyperGraph::EdgeSet edgesAdded; for (vector<SparseOptimizer::Edge*>::iterator it = edges.begin(); it != edges.end(); ++it) { SparseOptimizer::Edge* e = *it; int doInit = 0; SparseOptimizer::Vertex* v1 = optimizer.vertex(e->vertices()[0]->id()); SparseOptimizer::Vertex* v2 = optimizer.vertex(e->vertices()[1]->id()); if (! v1) { SparseOptimizer::Vertex* v = v1 = dynamic_cast<SparseOptimizer::Vertex*>(e->vertices()[0]); bool v1Added = optimizer.addVertex(v); //cerr << "adding" << v->id() << "(" << v->dimension() << ")" << endl; assert(v1Added); if (! v1Added) cerr << "Error adding vertex " << v->id() << endl; else verticesAdded.insert(v); doInit = 1; if (v->dimension() == maxDim) vertexCount++; } if (! v2) { SparseOptimizer::Vertex* v = v2 = dynamic_cast<SparseOptimizer::Vertex*>(e->vertices()[1]); bool v2Added = optimizer.addVertex(v); //cerr << "adding" << v->id() << "(" << v->dimension() << ")" << endl; assert(v2Added); if (! v2Added) cerr << "Error adding vertex " << v->id() << endl; else verticesAdded.insert(v); doInit = 2; if (v->dimension() == maxDim) vertexCount++; } // adding the edge and initialization of the vertices { //cerr << " adding edge " << e->vertices()[0]->id() << " " << e->vertices()[1]->id() << endl; if (! optimizer.addEdge(e)) { cerr << "Unable to add edge " << e->vertices()[0]->id() << " -> " << e->vertices()[1]->id() << endl; } else { edgesAdded.insert(e); } if (doInit) { OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]); OptimizableGraph::Vertex* to = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]); switch (doInit){ case 1: // initialize v1 from v2 { HyperGraph::VertexSet toSet; toSet.insert(to); if (e->initialEstimatePossible(toSet, from) > 0.) { //cerr << "init: " //<< to->id() << "(" << to->dimension() << ") -> " //<< from->id() << "(" << from->dimension() << ") " << endl; e->initialEstimate(toSet, from); } else { assert(0 && "Added unitialized variable to the graph"); } break; } case 2: { HyperGraph::VertexSet fromSet; fromSet.insert(from); if (e->initialEstimatePossible(fromSet, to) > 0.) { //cerr << "init: " //<< from->id() << "(" << from->dimension() << ") -> " //<< to->id() << "(" << to->dimension() << ") " << endl; e->initialEstimate(fromSet, to); } else { assert(0 && "Added unitialized variable to the graph"); } break; } default: cerr << "doInit wrong value\n"; } } } freshlyOptimized=false; { //cerr << "Optimize" << endl; if (vertexCount - lastOptimizedVertexCount >= updateGraphEachN) { if (firstRound) { if (!optimizer.initializeOptimization()){ cerr << "initialization failed" << endl; return 0; } } else { if (! optimizer.updateInitialization(verticesAdded, edgesAdded)) { cerr << "updating initialization failed" << endl; return 0; } } verticesAdded.clear(); edgesAdded.clear(); double ts = get_monotonic_time(); int currentIt=optimizer.optimize(incIterations, !firstRound); double dts = get_monotonic_time() - ts; cumTime += dts; firstRound = false; //optimizer->setOptimizationTime(cumTime); if (verbose) { double chi2 = optimizer.chi2(); cerr << "nodes= " << optimizer.vertices().size() << "\t edges= " << optimizer.edges().size() << "\t chi2= " << chi2 << "\t time= " << dts << "\t iterations= " << currentIt << "\t cumTime= " << cumTime << endl; } lastOptimizedVertexCount = vertexCount; freshlyOptimized = true; if (guiOut) { if (vertexCount - lastVisUpdateVertexCount >= updateDisplayEveryN) { dumpEdges(cout, optimizer); lastVisUpdateVertexCount = vertexCount; } } } if (! verbose) cerr << "."; } } // for all edges if (! freshlyOptimized) { double ts = get_monotonic_time(); int currentIt=optimizer.optimize(incIterations, !firstRound); double dts = get_monotonic_time() - ts; cumTime += dts; //optimizer->setOptimizationTime(cumTime); if (verbose) { double chi2 = optimizer.chi2(); cerr << "nodes= " << optimizer.vertices().size() << "\t edges= " << optimizer.edges().size() << "\t chi2= " << chi2 << "\t time= " << dts << "\t iterations= " << currentIt << "\t cumTime= " << cumTime << endl; } } } else { // BATCH optimization if (statsFile!=""){ // allocate buffer for statistics; optimizer.setComputeBatchStatistics(true); } optimizer.initializeOptimization(); optimizer.computeActiveErrors(); double loadChi = optimizer.chi2(); cerr << "Initial chi2 = " << FIXED(loadChi) << endl; if (initialGuess) { optimizer.computeInitialGuess(); } else if (initialGuessOdometry) { EstimatePropagatorCostOdometry costFunction(&optimizer); optimizer.computeInitialGuess(costFunction); } double initChi = optimizer.chi2(); signal(SIGINT, sigquit_handler); int result=optimizer.optimize(maxIterations); if (maxIterations > 0 && result==OptimizationAlgorithm::Fail){ cerr << "Cholesky failed, result might be invalid" << endl; } else if (computeMarginals){ std::vector<std::pair<int, int> > blockIndices; for (size_t i=0; i<optimizer.activeVertices().size(); i++){ OptimizableGraph::Vertex* v=optimizer.activeVertices()[i]; if (v->hessianIndex()>=0){ blockIndices.push_back(make_pair(v->hessianIndex(), v->hessianIndex())); } if (v->hessianIndex()>0){ blockIndices.push_back(make_pair(v->hessianIndex()-1, v->hessianIndex())); } } SparseBlockMatrix<MatrixXd> spinv; if (optimizer.computeMarginals(spinv, blockIndices)) { for (size_t i=0; i<optimizer.activeVertices().size(); i++){ OptimizableGraph::Vertex* v=optimizer.activeVertices()[i]; cerr << "Vertex id:" << v->id() << endl; if (v->hessianIndex()>=0){ cerr << "inv block :" << v->hessianIndex() << ", " << v->hessianIndex()<< endl; cerr << *(spinv.block(v->hessianIndex(), v->hessianIndex())); cerr << endl; } if (v->hessianIndex()>0){ cerr << "inv block :" << v->hessianIndex()-1 << ", " << v->hessianIndex()<< endl; cerr << *(spinv.block(v->hessianIndex()-1, v->hessianIndex())); cerr << endl; } } } } optimizer.computeActiveErrors(); double finalChi=optimizer.chi2(); if (summaryFile!="") { PropertyMap summary; summary.makeProperty<StringProperty>("filename", inputFilename); summary.makeProperty<IntProperty>("n_vertices", optimizer.vertices().size()); summary.makeProperty<IntProperty>("n_edges", optimizer.edges().size()); int nLandmarks=0; int nPoses=0; int maxDim = *vertexDimensions.rbegin(); for (HyperGraph::VertexIDMap::iterator it=optimizer.vertices().begin(); it!=optimizer.vertices().end(); it++){ OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second); if (v->dimension() != maxDim) { nLandmarks++; } else nPoses++; } set<string> edgeTypes; for (HyperGraph::EdgeSet::iterator it=optimizer.edges().begin(); it!=optimizer.edges().end(); it++){ edgeTypes.insert(Factory::instance()->tag(*it)); } stringstream edgeTypesString; for (std::set<string>::iterator it=edgeTypes.begin(); it!=edgeTypes.end(); it++){ edgeTypesString << *it << " "; } summary.makeProperty<IntProperty>("n_poses", nPoses); summary.makeProperty<IntProperty>("n_landmarks", nLandmarks); summary.makeProperty<StringProperty>("edge_types", edgeTypesString.str()); summary.makeProperty<DoubleProperty>("load_chi", loadChi); summary.makeProperty<StringProperty>("solver", strSolver); summary.makeProperty<BoolProperty>("robustKernel", robustKernel.size() > 0); summary.makeProperty<DoubleProperty>("init_chi", initChi); summary.makeProperty<DoubleProperty>("final_chi", finalChi); summary.makeProperty<IntProperty>("maxIterations", maxIterations); summary.makeProperty<IntProperty>("realIterations", result); ofstream os; os.open(summaryFile.c_str(), ios::app); summary.writeToCSV(os); } if (statsFile!=""){ cerr << "writing stats to file \"" << statsFile << "\" ... "; ofstream os(statsFile.c_str()); const BatchStatisticsContainer& bsc = optimizer.batchStatistics(); for (int i=0; i<maxIterations; i++) { os << bsc[i] << endl; } cerr << "done." << endl; } } // saving again if (gnudump.size() > 0) { bool gnuPlotStatus = saveGnuplot(gnudump, optimizer); if (! gnuPlotStatus) { cerr << "Error while writing gnuplot files" << endl; } } if (outputfilename.size() > 0) { if (outputfilename == "-") { cerr << "saving to stdout"; optimizer.save(cout); } else { cerr << "saving " << outputfilename << " ... "; optimizer.save(outputfilename.c_str()); } cerr << "done." << endl; } // destroy all the singletons //Factory::destroy(); //OptimizationAlgorithmFactory::destroy(); //HyperGraphActionLibrary::destroy(); return 0; }