bool SparseOptimizer::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset) { std::vector<HyperGraph::Vertex*> newVertices; newVertices.reserve(vset.size()); _activeVertices.reserve(_activeVertices.size() + vset.size()); //for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) //_activeVertices.push_back(static_cast<OptimizableGraph::Vertex*>(*it)); _activeEdges.reserve(_activeEdges.size() + eset.size()); for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) _activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it)); // update the index mapping size_t next = _ivMap.size(); for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) { OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it); if (! v->fixed()){ if (! v->marginalized()){ v->setTempIndex(next); _ivMap.push_back(v); newVertices.push_back(v); _activeVertices.push_back(v); next++; } else // not supported right now abort(); } else { v->setTempIndex(-1); } } //if (newVertices.size() != vset.size()) //cerr << __PRETTY_FUNCTION__ << ": something went wrong " << PVAR(vset.size()) << " " << PVAR(newVertices.size()) << endl; return _solver->updateStructure(newVertices, eset); }
void starsInEdge(StarSet& stars, HyperGraph::Edge* e, EdgeStarMap& esmap, HyperGraph::VertexSet& gauge){ for (size_t i=0; i<e->vertices().size(); i++){ OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)e->vertices()[i]; if (gauge.find(v)==gauge.end()) starsInVertex(stars, v, esmap); } }
void OptimizableGraph::push(HyperGraph::VertexSet& vset) { for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); it++) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it); v->push(); } }
void HyperDijkstra::shortestPaths(HyperGraph::Vertex* v, HyperDijkstra::CostFunction* cost, double maxDistance, double comparisonConditioner, bool directed, double maxEdgeCost) { HyperGraph::VertexSet vset; vset.insert(v); shortestPaths(vset, cost, maxDistance, comparisonConditioner, directed, maxEdgeCost); }
void OptimizableGraph::setFixed(HyperGraph::VertexSet& vset, bool fixed) { for (HyperGraph::VertexSet::iterator it=vset.begin(); it!=vset.end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it); v->setFixed(fixed); } }
void OptimizableGraph::discardTop(HyperGraph::VertexSet& vset) { for (HyperGraph::VertexSet::iterator it=vset.begin(); it!=vset.end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it); v->discardTop(); } }
bool saveGnuplot(const std::string& gnudump, const OptimizableGraph& optimizer) { HyperGraph::VertexSet vset; for (HyperGraph::VertexIDMap::const_iterator it=optimizer.vertices().begin(); it!=optimizer.vertices().end(); it++){ vset.insert(it->second); } return saveGnuplot(gnudump, vset, optimizer.edges()); }
void SparseOptimizer::push(HyperGraph::VertexSet& vlist) { for (HyperGraph::VertexSet::iterator it = vlist.begin(); it != vlist.end(); ++it) { OptimizableGraph::Vertex* v = dynamic_cast<OptimizableGraph::Vertex*>(*it); if (v) v->push(); else cerr << __FUNCTION__ << ": FATAL PUSH SET" << endl; } }
void SparseOptimizer::pop(HyperGraph::VertexSet& vlist) { for (HyperGraph::VertexSet::iterator it = vlist.begin(); it != vlist.end(); ++it){ OptimizableGraph::Vertex* v = dynamic_cast<OptimizableGraph::Vertex*> (*it); if (v) v->pop(); else cerr << "FATAL POP SET" << endl; } }
void HyperDijkstra::shortestPaths(HyperGraph::VertexSet& vset, HyperDijkstra::CostFunction* cost, double maxDistance, double comparisonConditioner, bool directed, double maxEdgeCost) { reset(); std::priority_queue< AdjacencyMapEntry > frontier; for (HyperGraph::VertexSet::iterator vit=vset.begin(); vit!=vset.end(); ++vit){ HyperGraph::Vertex* v=*vit; AdjacencyMap::iterator it=_adjacencyMap.find(v); assert(it!=_adjacencyMap.end()); it->second._distance=0.; it->second._parent=0; frontier.push(it->second); } while(! frontier.empty()){ AdjacencyMapEntry entry=frontier.top(); frontier.pop(); HyperGraph::Vertex* u=entry.child(); AdjacencyMap::iterator ut=_adjacencyMap.find(u); assert(ut!=_adjacencyMap.end()); double uDistance=ut->second.distance(); std::pair< HyperGraph::VertexSet::iterator, bool> insertResult=_visited.insert(u); (void) insertResult; HyperGraph::EdgeSet::iterator et=u->edges().begin(); while (et != u->edges().end()){ HyperGraph::Edge* edge=*et; ++et; if (directed && edge->vertex(0) != u) continue; for (size_t i = 0; i < edge->vertices().size(); ++i) { HyperGraph::Vertex* z = edge->vertex(i); if (z == u) continue; double edgeDistance=(*cost)(edge, u, z); if (edgeDistance==std::numeric_limits< double >::max() || edgeDistance > maxEdgeCost) continue; double zDistance=uDistance+edgeDistance; //cerr << z->id() << " " << zDistance << endl; AdjacencyMap::iterator ot=_adjacencyMap.find(z); assert(ot!=_adjacencyMap.end()); if (zDistance+comparisonConditioner<ot->second.distance() && zDistance<maxDistance){ ot->second._distance=zDistance; ot->second._parent=u; ot->second._edge=edge; frontier.push(ot->second); } } } } }
bool SparseOptimizer::initializeOptimization(int level) { HyperGraph::VertexSet vset; for (VertexIDMap::iterator it = vertices().begin(); it != vertices().end(); it++) vset.insert(it->second); return initializeOptimization(vset,level); }
void SparseOptimizer::computeInitialGuess(EstimatePropagatorCost& costFunction) { OptimizableGraph::VertexSet emptySet; std::set<Vertex*> backupVertices; HyperGraph::VertexSet fixedVertices; // these are the root nodes where to start the initialization for (EdgeContainer::iterator it = _activeEdges.begin(); it != _activeEdges.end(); ++it) { OptimizableGraph::Edge* e = *it; for (size_t i = 0; i < e->vertices().size(); ++i) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(e->vertex(i)); if (!v) continue; if (v->fixed()) fixedVertices.insert(v); else { // check for having a prior which is able to fully initialize a vertex for (EdgeSet::const_iterator vedgeIt = v->edges().begin(); vedgeIt != v->edges().end(); ++vedgeIt) { OptimizableGraph::Edge* vedge = static_cast<OptimizableGraph::Edge*>(*vedgeIt); if (vedge->vertices().size() == 1 && vedge->initialEstimatePossible(emptySet, v) > 0.) { //cerr << "Initialize with prior for " << v->id() << endl; vedge->initialEstimate(emptySet, v); fixedVertices.insert(v); } } } if (v->hessianIndex() == -1) { std::set<Vertex*>::const_iterator foundIt = backupVertices.find(v); if (foundIt == backupVertices.end()) { v->push(); backupVertices.insert(v); } } } } EstimatePropagator estimatePropagator(this); estimatePropagator.propagate(fixedVertices, costFunction); // restoring the vertices that should not be initialized for (std::set<Vertex*>::iterator it = backupVertices.begin(); it != backupVertices.end(); ++it) { Vertex* v = *it; v->pop(); } if (verbose()) { computeActiveErrors(); cerr << "iteration= -1\t chi2= " << activeChi2() << "\t time= 0.0" << "\t cumTime= 0.0" << "\t (using initial guess from " << costFunction.name() << ")" << endl; } }
int main(int argc, char** argv) { CommandArgs arg; std::string outputFilename; std::string inputFilename; arg.param("o", outputFilename, "", "output file name"); arg.paramLeftOver("input-filename ", inputFilename, "", "graph file to read", true); arg.parseArgs(argc, argv); OptimizableGraph graph; if (!graph.load(inputFilename.c_str())){ cerr << "Error: cannot load a file from \"" << inputFilename << "\", aborting." << endl; return 0; } HyperGraph::EdgeSet removedEdges; HyperGraph::VertexSet removedVertices; for (HyperGraph::EdgeSet::iterator it = graph.edges().begin(); it!=graph.edges().end(); it++) { HyperGraph::Edge* e = *it; EdgeSE2PointXY* edgePointXY = dynamic_cast<EdgeSE2PointXY*>(e); if (edgePointXY) { VertexSE2* pose = dynamic_cast<VertexSE2*>(edgePointXY->vertex(0)); VertexPointXY* landmark = dynamic_cast<VertexPointXY*>(edgePointXY->vertex(1)); FeaturePointXYData * feature = new FeaturePointXYData(); feature->setPositionMeasurement(edgePointXY->measurement()); feature->setPositionInformation(edgePointXY->information()); pose->addUserData(feature); removedEdges.insert(edgePointXY); removedVertices.insert(landmark); } } for (HyperGraph::EdgeSet::iterator it = removedEdges.begin(); it!=removedEdges.end(); it++){ OptimizableGraph::Edge* e = dynamic_cast<OptimizableGraph::Edge*>(*it); graph.removeEdge(e); } for (HyperGraph::VertexSet::iterator it = removedVertices.begin(); it!=removedVertices.end(); it++){ OptimizableGraph::Vertex* v = dynamic_cast<OptimizableGraph::Vertex*>(*it); graph.removeVertex(v); } if (outputFilename.length()){ graph.save(outputFilename.c_str()); } }
void HyperDijkstra::connectedSubset(HyperGraph::VertexSet& connected, HyperGraph::VertexSet& visited, HyperGraph::VertexSet& startingSet, HyperGraph* g, HyperGraph::Vertex* v, HyperDijkstra::CostFunction* cost, double distance, double comparisonConditioner, double maxEdgeCost) { typedef std::queue<HyperGraph::Vertex*> VertexDeque; visited.clear(); connected.clear(); VertexDeque frontier; HyperDijkstra dv(g); connected.insert(v); frontier.push(v); while (! frontier.empty()) { HyperGraph::Vertex* v0=frontier.front(); frontier.pop(); dv.shortestPaths(v0, cost, distance, comparisonConditioner, false, maxEdgeCost); for (HyperGraph::VertexSet::iterator it=dv.visited().begin(); it!=dv.visited().end(); ++it) { visited.insert(*it); if (startingSet.find(*it)==startingSet.end()) continue; std::pair<HyperGraph::VertexSet::iterator, bool> insertOutcome=connected.insert(*it); if (insertOutcome.second) { // the node was not in the connectedSet; frontier.push(dynamic_cast<HyperGraph::Vertex*>(*it)); } } } }
bool SparseOptimizer::initializeOptimization (HyperGraph::VertexSet& vset, int level) { // Recorre todos los vertices introducidos en el optimizador. // Para cada vertice 'V' obtiene los edges de los que forma parte. // Para cada uno de esos edges, se mira si todos sus vertices estan en el // optimizador. Si lo estan, el edge se aniade a _activeEdges. // Si el vertice 'V' tiene algun edge con todos los demas vertices en el // optimizador, se aniade 'V' a _activeVertices // Al final se asignan unos indices internos para los vertices: // -1: vertices fijos // 0..n: vertices no fijos y NO marginalizables // n+1..m: vertices no fijos y marginalizables clearIndexMapping(); _activeVertices.clear(); _activeVertices.reserve(vset.size()); _activeEdges.clear(); set<Edge*> auxEdgeSet; // temporary structure to avoid duplicates for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); it++) { OptimizableGraph::Vertex* v= (OptimizableGraph::Vertex*) *it; const OptimizableGraph::EdgeSet& vEdges=v->edges(); // count if there are edges in that level. If not remove from the pool int levelEdges=0; for (OptimizableGraph::EdgeSet::const_iterator it = vEdges.begin(); it != vEdges.end(); it++) { OptimizableGraph::Edge* e = reinterpret_cast<OptimizableGraph::Edge*>(*it); if (level < 0 || e->level() == level) { bool allVerticesOK = true; for (vector<HyperGraph::Vertex*>::const_iterator vit = e->vertices().begin(); vit != e->vertices().end(); ++vit) { if (vset.find(*vit) == vset.end()) { allVerticesOK = false; break; } } if (allVerticesOK) { auxEdgeSet.insert(reinterpret_cast<OptimizableGraph::Edge*>(*it)); levelEdges++; } } } if (levelEdges) _activeVertices.push_back(v); } _activeEdges.reserve(auxEdgeSet.size()); for (set<Edge*>::iterator it = auxEdgeSet.begin(); it != auxEdgeSet.end(); ++it) _activeEdges.push_back(*it); sortVectorContainers(); return buildIndexMapping(_activeVertices); }
bool saveGnuplot(const std::string& gnudump, const HyperGraph::VertexSet& vertices, const HyperGraph::EdgeSet& edges) { // seek for an action whose name is writeGnuplot in the library HyperGraphElementAction* saveGnuplot = HyperGraphActionLibrary::instance()->actionByName("writeGnuplot"); if (! saveGnuplot ){ cerr << __PRETTY_FUNCTION__ << ": no action \"writeGnuplot\" registered" << endl; return false; } WriteGnuplotAction::Parameters params; int maxDim = -1; int minDim = numeric_limits<int>::max(); for (HyperGraph::VertexSet::const_iterator it = vertices.begin(); it != vertices.end(); ++it){ OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it); int vdim = v->dimension(); maxDim = (std::max)(vdim, maxDim); minDim = (std::min)(vdim, minDim); } string extension = getFileExtension(gnudump); if (extension.size() == 0) extension = "dat"; string baseFilename = getPureFilename(gnudump); // check for odometry edges bool hasOdomEdge = false; bool hasLandmarkEdge = false; for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); if (e->vertices().size() == 2) { if (edgeAllVertsSameDim(e, maxDim)) hasOdomEdge = true; else hasLandmarkEdge = true; } if (hasOdomEdge && hasLandmarkEdge) break; } bool fileStatus = true; if (hasOdomEdge) { string odomFilename = baseFilename + "_odom_edges." + extension; cerr << "# saving " << odomFilename << " ... "; ofstream fout(odomFilename.c_str()); if (! fout) { cerr << "Unable to open file" << endl; return false; } params.os = &fout; // writing odometry edges for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); if (e->vertices().size() != 2 || ! edgeAllVertsSameDim(e, maxDim)) continue; (*saveGnuplot)(e, ¶ms); } cerr << "done." << endl; } if (hasLandmarkEdge) { string filename = baseFilename + "_landmarks_edges." + extension; cerr << "# saving " << filename << " ... "; ofstream fout(filename.c_str()); if (! fout) { cerr << "Unable to open file" << endl; return false; } params.os = &fout; // writing landmark edges for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); if (e->vertices().size() != 2 || edgeAllVertsSameDim(e, maxDim)) continue; (*saveGnuplot)(e, ¶ms); } cerr << "done." << endl; } if (1) { string filename = baseFilename + "_edges." + extension; cerr << "# saving " << filename << " ... "; ofstream fout(filename.c_str()); if (! fout) { cerr << "Unable to open file" << endl; return false; } params.os = &fout; // writing all edges for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); (*saveGnuplot)(e, ¶ms); } cerr << "done." << endl; } if (1) { string filename = baseFilename + "_vertices." + extension; cerr << "# saving " << filename << " ... "; ofstream fout(filename.c_str()); if (! fout) { cerr << "Unable to open file" << endl; return false; } params.os = &fout; for (HyperGraph::VertexSet::const_iterator it = vertices.begin(); it != vertices.end(); ++it){ OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it); (*saveGnuplot)(v, ¶ms); } cerr << "done." << endl; } return fileStatus; }
void computeSimpleStars(StarSet& stars, SparseOptimizer* optimizer, EdgeLabeler* labeler, EdgeCreator* creator, OptimizableGraph::Vertex* gauge_, std::string edgeTag, std::string vertexTag, int level, int step, int backboneIterations, int starIterations, double rejectionThreshold, bool debug){ cerr << "preforming the tree actions" << endl; HyperDijkstra d(optimizer); // compute a spanning tree based on the types of edges and vertices in the pool EdgeTypesCostFunction f(edgeTag, vertexTag, level); d.shortestPaths(gauge_, &f, std::numeric_limits< double >::max(), 1e-6, false, std::numeric_limits< double >::max()/2); HyperDijkstra::computeTree(d.adjacencyMap()); // constructs the stars on the backbone BackBoneTreeAction bact(optimizer, vertexTag, level, step); bact.init(); cerr << "free edges size " << bact.freeEdges().size() << endl; // perform breadth-first visit of the visit tree and create the stars on the backbone d.visitAdjacencyMap(d.adjacencyMap(),&bact,true); stars.clear(); for (VertexStarMultimap::iterator it=bact.vertexStarMultiMap().begin(); it!=bact.vertexStarMultiMap().end(); it++){ stars.insert(it->second); } cerr << "stars.size: " << stars.size() << endl; cerr << "size: " << bact.vertexStarMultiMap().size() << endl; // for each star // for all vertices in the backbone, select all edges leading/leaving from that vertex // that are contained in freeEdges. // mark the corresponding "open" vertices and add them to a multimap (vertex->star) // select a gauge in the backbone // push all vertices on the backbone // compute an initial guess on the backbone // one round of optimization backbone // lock all vertices in the backbone // push all "open" vertices // for each open vertex, // compute an initial guess given the backbone // do some rounds of solveDirect // if (fail) // - remove the vertex and the edges in that vertex from the star // - make the structures consistent // pop all "open" vertices // pop all "vertices" in the backbone // unfix the vertices in the backbone int starNum=0; for (StarSet::iterator it=stars.begin(); it!=stars.end(); it++){ Star* s =*it; HyperGraph::VertexSet backboneVertices = s->_lowLevelVertices; HyperGraph::EdgeSet backboneEdges = s->_lowLevelEdges; if (backboneEdges.empty()) continue; // cerr << "optimizing backbone" << endl; // one of these should be the gauge, to be simple we select the fisrt one in the backbone OptimizableGraph::VertexSet gauge; gauge.insert(*backboneVertices.begin()); s->gauge()=gauge; s->optimizer()->push(backboneVertices); s->optimizer()->setFixed(gauge,true); s->optimizer()->initializeOptimization(backboneEdges); s->optimizer()->computeInitialGuess(); s->optimizer()->optimize(backboneIterations); s->optimizer()->setFixed(backboneVertices, true); // cerr << "assignind edges.vertices not in bbone" << endl; HyperGraph::EdgeSet otherEdges; HyperGraph::VertexSet otherVertices; std::multimap<HyperGraph::Vertex*, HyperGraph::Edge*> vemap; for (HyperGraph::VertexSet::iterator bit=backboneVertices.begin(); bit!=backboneVertices.end(); bit++){ HyperGraph::Vertex* v=*bit; for (HyperGraph::EdgeSet::iterator eit=v->edges().begin(); eit!=v->edges().end(); eit++){ OptimizableGraph::Edge* e = (OptimizableGraph::Edge*) *eit; HyperGraph::EdgeSet::iterator feit=bact.freeEdges().find(e); if (feit!=bact.freeEdges().end()){ // edge is admissible otherEdges.insert(e); bact.freeEdges().erase(feit); for (size_t i=0; i<e->vertices().size(); i++){ OptimizableGraph::Vertex* ve= (OptimizableGraph::Vertex*)e->vertices()[i]; if (backboneVertices.find(ve)==backboneVertices.end()){ otherVertices.insert(ve); vemap.insert(make_pair(ve,e)); } } } } } // RAINER TODO maybe need a better solution than dynamic casting here?? OptimizationAlgorithmWithHessian* solverWithHessian = dynamic_cast<OptimizationAlgorithmWithHessian*>(s->optimizer()->solver()); if (solverWithHessian) { s->optimizer()->push(otherVertices); // cerr << "optimizing vertices out of bbone" << endl; // cerr << "push" << endl; // cerr << "init" << endl; s->optimizer()->initializeOptimization(otherEdges); // cerr << "guess" << endl; s->optimizer()->computeInitialGuess(); // cerr << "solver init" << endl; s->optimizer()->solver()->init(); // cerr << "structure" << endl; if (!solverWithHessian->buildLinearStructure()) cerr << "FATAL: failure while building linear structure" << endl; // cerr << "errors" << endl; s->optimizer()->computeActiveErrors(); // cerr << "system" << endl; solverWithHessian->updateLinearSystem(); // cerr << "directSolove" << endl; } else { cerr << "FATAL: hierarchical thing cannot be used with a solver that does not support the system structure construction" << endl; } // // then optimize the vertices one at a time to check if a solution is good for (HyperGraph::VertexSet::iterator vit=otherVertices.begin(); vit!=otherVertices.end(); vit++){ OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)(*vit); v->solveDirect(); // cerr << " " << d; // if a solution is found, add a vertex and all the edges in //othervertices that are pointing to that edge to the star s->_lowLevelVertices.insert(v); for (HyperGraph::EdgeSet::iterator eit=v->edges().begin(); eit!=v->edges().end(); eit++){ OptimizableGraph::Edge* e = (OptimizableGraph::Edge*) *eit; if (otherEdges.find(e)!=otherEdges.end()) s->_lowLevelEdges.insert(e); } } //cerr << endl; // relax the backbone and optimize it all // cerr << "relax bbone" << endl; s->optimizer()->setFixed(backboneVertices, false); //cerr << "fox gauge bbone" << endl; s->optimizer()->setFixed(s->gauge(),true); //cerr << "opt init" << endl; s->optimizer()->initializeOptimization(s->_lowLevelEdges); optimizer->computeActiveErrors(); double initialChi = optimizer->activeChi2(); int starOptResult = s->optimizer()->optimize(starIterations); //cerr << starOptResult << "(" << starIterations << ") " << endl; double finalchi=-1.; cerr << "computing star: " << starNum << endl; int vKept=0, vDropped=0; if (!starIterations || starOptResult > 0 ){ optimizer->computeActiveErrors(); finalchi = optimizer->activeChi2(); #if 1 s->optimizer()->computeActiveErrors(); // cerr << "system" << endl; if (solverWithHessian) solverWithHessian->updateLinearSystem(); HyperGraph::EdgeSet prunedStarEdges = backboneEdges; HyperGraph::VertexSet prunedStarVertices = backboneVertices; for (HyperGraph::VertexSet::iterator vit=otherVertices.begin(); vit!=otherVertices.end(); vit++){ //discard the vertices whose error is too big OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)(*vit); MatrixXd h(v->dimension(), v->dimension()); for (int i=0; i<v->dimension(); i++){ for (int j=0; j<v->dimension(); j++) h(i,j)=v->hessian(i,j); } EigenSolver<Eigen::MatrixXd> esolver; esolver.compute(h); VectorXcd ev= esolver.eigenvalues(); double emin = std::numeric_limits<double>::max(); double emax = -std::numeric_limits<double>::max(); for (int i=0; i<ev.size(); i++){ emin = ev(i).real()>emin ? emin : ev(i).real(); emax = ev(i).real()<emax ? emax : ev(i).real(); } double d=emin/emax; // cerr << " " << d; if (d>rejectionThreshold){ // if a solution is found, add a vertex and all the edges in //othervertices that are pointing to that edge to the star prunedStarVertices.insert(v); for (HyperGraph::EdgeSet::iterator eit=v->edges().begin(); eit!=v->edges().end(); eit++){ OptimizableGraph::Edge* e = (OptimizableGraph::Edge*) *eit; if (otherEdges.find(e)!=otherEdges.end()) prunedStarEdges.insert(e); } //cerr << "K( " << v->id() << "," << d << ")" ; vKept ++; } else { vDropped++; //cerr << "R( " << v->id() << "," << d << ")" ; } } s->_lowLevelEdges=prunedStarEdges; s->_lowLevelVertices=prunedStarVertices; #endif //cerr << "addHedges" << endl; //now add to the star the hierarchical edges std::vector<OptimizableGraph::Vertex*> vertices(2); vertices[0]= (OptimizableGraph::Vertex*) *s->_gauge.begin(); for (HyperGraph::VertexSet::iterator vit=s->_lowLevelVertices.begin(); vit!=s->_lowLevelVertices.end(); vit++){ OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)*vit; vertices[1]=v; if (v==vertices[0]) continue; OptimizableGraph::Edge* e=creator->createEdge(vertices); //rr << "creating edge" << e << Factory::instance()->tag(vertices[0]) << "->" << Factory::instance()->tag(v) <endl; if (e) { e->setLevel(level+1); optimizer->addEdge(e); s->_starEdges.insert(e); } else { cerr << "HERE" << endl; cerr << "FATAL, cannot create edge" << endl; } } } cerr << " gauge: " << (*s->_gauge.begin())->id() << " kept: " << vKept << " dropped: " << vDropped << " edges:" << s->_lowLevelEdges.size() << " hedges" << s->_starEdges.size() << " initial chi " << initialChi << " final chi " << finalchi << endl; if (debug) { char starLowName[100]; sprintf(starLowName, "star-%04d-low.g2o", starNum); ofstream starLowStream(starLowName); optimizer->saveSubset(starLowStream, s->_lowLevelEdges); } bool labelOk=false; if (!starIterations || starOptResult > 0) labelOk = s->labelStarEdges(0, labeler); if (labelOk) { if (debug) { char starHighName[100]; sprintf(starHighName, "star-%04d-high.g2o", starNum); ofstream starHighStream(starHighName); optimizer->saveSubset(starHighStream, s->_starEdges); } } else { cerr << "FAILURE: " << starOptResult << endl; } starNum++; //label each hierarchical edge s->optimizer()->pop(otherVertices); s->optimizer()->pop(backboneVertices); s->optimizer()->setFixed(s->gauge(),false); } StarSet stars2; // now erase the stars that have 0 edges. They r useless for (StarSet::iterator it=stars.begin(); it!=stars.end(); it++){ Star* s=*it; if (s->lowLevelEdges().size()==0) { delete s; } else stars2.insert(s); } stars=stars2; }
void assignHierarchicalEdges(StarSet& stars, EdgeStarMap& esmap, EdgeLabeler* labeler, EdgeCreator* creator, SparseOptimizer* optimizer, int minNumEdges, int maxIterations){ // now construct the hierarchical edges for all the stars int starNum=0; for (StarSet::iterator it=stars.begin(); it!=stars.end(); it++){ cerr << "STAR# " << starNum << endl; Star* s=*it; std::vector<OptimizableGraph::Vertex*> vertices(2); vertices[0]= (OptimizableGraph::Vertex*) *s->_gauge.begin(); cerr << "eIs" << endl; HyperGraph::VertexSet vNew =s->lowLevelVertices(); for (HyperGraph::VertexSet::iterator vit=s->_lowLevelVertices.begin(); vit!=s->_lowLevelVertices.end(); vit++){ OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)*vit; vertices[1]=v; if (v==vertices[0]) continue; HyperGraph::EdgeSet eInSt; int numEdges = vertexEdgesInStar(eInSt, v, s, esmap); if (Factory::instance()->tag(v)==Factory::instance()->tag(vertices[0]) || numEdges>minNumEdges) { OptimizableGraph::Edge* e=creator->createEdge(vertices); //cerr << "creating edge" << e << endl; if (e) { e->setLevel(1); optimizer->addEdge(e); s->_starEdges.insert(e); } else { cerr << "THERE" << endl; cerr << "FATAL, cannot create edge" << endl; } } else { vNew.erase(v); // cerr << numEdges << " "; // cerr << "r " << v-> id() << endl; // remove from the star all edges that are not sufficiently connected for (HyperGraph::EdgeSet::iterator it=eInSt.begin(); it!=eInSt.end(); it++){ HyperGraph::Edge* e=*it; s->lowLevelEdges().erase(e); } } } s->lowLevelVertices()=vNew; //cerr << endl; cerr << "gauge: " << (*s->_gauge.begin())->id() << " edges:" << s->_lowLevelEdges.size() << " hedges" << s->_starEdges.size() << endl; const bool debug = false; if (debug){ char starLowName[100]; sprintf(starLowName, "star-%04d-low.g2o", starNum); ofstream starLowStream(starLowName); optimizer->saveSubset(starLowStream, s->_lowLevelEdges); } bool labelOk=s->labelStarEdges(maxIterations, labeler); if (labelOk) { if (debug) { char starHighName[100]; sprintf(starHighName, "star-%04d-high.g2o", starNum); ofstream starHighStream(starHighName); optimizer->saveSubset(starHighStream, s->_starEdges); } } else { cerr << "FAILURE" << endl; } starNum++; } }
bool G2oSlamInterface::addEdge(const std::string& tag, int id, int dimension, int v1Id, int v2Id, const std::vector<double>& measurement, const std::vector<double>& information) { (void) tag; (void) id; size_t oldEdgesSize = _optimizer->edges().size(); if (dimension == 3) { SE2 transf(measurement[0], measurement[1], measurement[2]); Eigen::Matrix3d infMat; int idx = 0; for (int r = 0; r < 3; ++r) for (int c = r; c < 3; ++c, ++idx) { assert(idx < (int)information.size()); infMat(r,c) = infMat(c,r) = information[idx]; } //cerr << PVAR(infMat) << endl; int doInit = 0; SparseOptimizer::Vertex* v1 = _optimizer->vertex(v1Id); SparseOptimizer::Vertex* v2 = _optimizer->vertex(v2Id); if (! v1) { OptimizableGraph::Vertex* v = v1 = addVertex(dimension, v1Id); _verticesAdded.insert(v); doInit = 1; ++_nodesAdded; } if (! v2) { OptimizableGraph::Vertex* v = v2 = addVertex(dimension, v2Id); _verticesAdded.insert(v); doInit = 2; ++_nodesAdded; } if (_optimizer->edges().size() == 0) { cerr << "FIRST EDGE "; if (v1->id() < v2->id()) { cerr << "fixing " << v1->id() << endl; v1->setFixed(true); } else { cerr << "fixing " << v2->id() << endl; v2->setFixed(true); } } OnlineEdgeSE2* e = new OnlineEdgeSE2; e->vertices()[0] = v1; e->vertices()[1] = v2; e->setMeasurement(transf); e->setInformation(infMat); _optimizer->addEdge(e); _edgesAdded.insert(e); if (doInit) { OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]); OptimizableGraph::Vertex* to = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]); switch (doInit){ case 1: // initialize v1 from v2 { HyperGraph::VertexSet toSet; toSet.insert(to); if (e->initialEstimatePossible(toSet, from) > 0.) { e->initialEstimate(toSet, from); } break; } case 2: { HyperGraph::VertexSet fromSet; fromSet.insert(from); if (e->initialEstimatePossible(fromSet, to) > 0.) { e->initialEstimate(fromSet, to); } break; } default: cerr << "doInit wrong value\n"; } } } else if (dimension == 6) { Eigen::Isometry3d transf; Matrix<double, 6, 6> infMat; if (measurement.size() == 7) { // measurement is a Quaternion Vector7d meas; for (int i=0; i<7; ++i) meas(i) = measurement[i]; // normalize the quaternion to recover numerical precision lost by storing as human readable text Vector4d::MapType(meas.data()+3).normalize(); transf = internal::fromVectorQT(meas); for (int i = 0, idx = 0; i < infMat.rows(); ++i) for (int j = i; j < infMat.cols(); ++j){ infMat(i,j) = information[idx++]; if (i != j) infMat(j,i)=infMat(i,j); } } else { // measurement consists of Euler angles Vector6d aux; aux << measurement[0], measurement[1], measurement[2],measurement[3], measurement[4], measurement[5]; transf = internal::fromVectorET(aux); Matrix<double, 6, 6> infMatEuler; int idx = 0; for (int r = 0; r < 6; ++r) for (int c = r; c < 6; ++c, ++idx) { assert(idx < (int)information.size()); infMatEuler(r,c) = infMatEuler(c,r) = information[idx]; } // convert information matrix to our internal representation Matrix<double, 6, 6> J; SE3Quat transfAsSe3(transf.matrix().topLeftCorner<3,3>(), transf.translation()); jac_quat3_euler3(J, transfAsSe3); infMat.noalias() = J.transpose() * infMatEuler * J; //cerr << PVAR(transf.matrix()) << endl; //cerr << PVAR(infMat) << endl; } int doInit = 0; SparseOptimizer::Vertex* v1 = _optimizer->vertex(v1Id); SparseOptimizer::Vertex* v2 = _optimizer->vertex(v2Id); if (! v1) { OptimizableGraph::Vertex* v = v1 = addVertex(dimension, v1Id); _verticesAdded.insert(v); doInit = 1; ++_nodesAdded; } if (! v2) { OptimizableGraph::Vertex* v = v2 = addVertex(dimension, v2Id); _verticesAdded.insert(v); doInit = 2; ++_nodesAdded; } if (_optimizer->edges().size() == 0) { cerr << "FIRST EDGE "; if (v1->id() < v2->id()) { cerr << "fixing " << v1->id() << endl; v1->setFixed(true); } else { cerr << "fixing " << v2->id() << endl; v2->setFixed(true); } } OnlineEdgeSE3* e = new OnlineEdgeSE3; e->vertices()[0] = v1; e->vertices()[1] = v2; e->setMeasurement(transf); e->setInformation(infMat); _optimizer->addEdge(e); _edgesAdded.insert(e); if (doInit) { OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]); OptimizableGraph::Vertex* to = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]); switch (doInit){ case 1: // initialize v1 from v2 { HyperGraph::VertexSet toSet; toSet.insert(to); if (e->initialEstimatePossible(toSet, from) > 0.) { e->initialEstimate(toSet, from); } break; } case 2: { HyperGraph::VertexSet fromSet; fromSet.insert(from); if (e->initialEstimatePossible(fromSet, to) > 0.) { e->initialEstimate(fromSet, to); } break; } default: cerr << "doInit wrong value\n"; } } } else { cerr << __PRETTY_FUNCTION__ << " not implemented for this dimension" << endl; return false; } if (oldEdgesSize == 0) { _optimizer->jacobianWorkspace().allocate(); } return true; }
int main(int argc, char** argv) { OptimizableGraph::initMultiThreading(); int maxIterations; bool verbose; string inputFilename; string gnudump; string outputfilename; string solverProperties; string strSolver; string loadLookup; bool initialGuess; bool initialGuessOdometry; bool marginalize; bool listTypes; bool listSolvers; bool listRobustKernels; bool incremental; bool guiOut; int gaugeId; string robustKernel; bool computeMarginals; bool printSolverProperties; double huberWidth; double gain; int maxIterationsWithGain; //double lambdaInit; int updateGraphEachN = 10; string statsFile; string summaryFile; bool nonSequential; // command line parsing std::vector<int> gaugeList; CommandArgs arg; arg.param("i", maxIterations, 5, "perform n iterations, if negative consider the gain"); arg.param("gain", gain, 1e-6, "the gain used to stop optimization (default = 1e-6)"); arg.param("ig",maxIterationsWithGain, std::numeric_limits<int>::max(), "Maximum number of iterations with gain enabled (default: inf)"); arg.param("v", verbose, false, "verbose output of the optimization process"); arg.param("guess", initialGuess, false, "initial guess based on spanning tree"); arg.param("guessOdometry", initialGuessOdometry, false, "initial guess based on odometry"); arg.param("inc", incremental, false, "run incremetally"); arg.param("update", updateGraphEachN, 10, "updates after x odometry nodes"); arg.param("guiout", guiOut, false, "gui output while running incrementally"); arg.param("marginalize", marginalize, false, "on or off"); arg.param("printSolverProperties", printSolverProperties, false, "print the properties of the solver"); arg.param("solverProperties", solverProperties, "", "set the internal properties of a solver,\n\te.g., initialLambda=0.0001,maxTrialsAfterFailure=2"); arg.param("gnudump", gnudump, "", "dump to gnuplot data file"); arg.param("robustKernel", robustKernel, "", "use this robust error function"); arg.param("robustKernelWidth", huberWidth, -1., "width for the robust Kernel (only if robustKernel)"); arg.param("computeMarginals", computeMarginals, false, "computes the marginal covariances of something. FOR TESTING ONLY"); arg.param("gaugeId", gaugeId, -1, "force the gauge"); arg.param("o", outputfilename, "", "output final version of the graph"); arg.param("solver", strSolver, "gn_var", "specify which solver to use underneat\n\t {gn_var, lm_fix3_2, gn_fix6_3, lm_fix7_3}"); #ifndef G2O_DISABLE_DYNAMIC_LOADING_OF_LIBRARIES string dummy; arg.param("solverlib", dummy, "", "specify a solver library which will be loaded"); arg.param("typeslib", dummy, "", "specify a types library which will be loaded"); #endif arg.param("stats", statsFile, "", "specify a file for the statistics"); arg.param("listTypes", listTypes, false, "list the registered types"); arg.param("listRobustKernels", listRobustKernels, false, "list the registered robust kernels"); arg.param("listSolvers", listSolvers, false, "list the available solvers"); arg.param("renameTypes", loadLookup, "", "create a lookup for loading types into other types,\n\t TAG_IN_FILE=INTERNAL_TAG_FOR_TYPE,TAG2=INTERNAL2\n\t e.g., VERTEX_CAM=VERTEX_SE3:EXPMAP"); arg.param("gaugeList", gaugeList, std::vector<int>(), "set the list of gauges separated by commas without spaces \n e.g: 1,2,3,4,5 "); arg.param("summary", summaryFile, "", "append a summary of this optimization run to the summary file passed as argument"); arg.paramLeftOver("graph-input", inputFilename, "", "graph file which will be processed", true); arg.param("nonSequential", nonSequential, false, "apply the robust kernel only on loop closures and not odometries"); arg.parseArgs(argc, argv); if (verbose) { cout << "# Used Compiler: " << G2O_CXX_COMPILER << endl; } #ifndef G2O_DISABLE_DYNAMIC_LOADING_OF_LIBRARIES // registering all the types from the libraries DlWrapper dlTypesWrapper; loadStandardTypes(dlTypesWrapper, argc, argv); // register all the solvers DlWrapper dlSolverWrapper; loadStandardSolver(dlSolverWrapper, argc, argv); #else if (verbose) cout << "# linked version of g2o" << endl; #endif OptimizationAlgorithmFactory* solverFactory = OptimizationAlgorithmFactory::instance(); if (listSolvers) { solverFactory->listSolvers(cout); } if (listTypes) { Factory::instance()->printRegisteredTypes(cout, true); } if (listRobustKernels) { std::vector<std::string> kernels; RobustKernelFactory::instance()->fillKnownKernels(kernels); cout << "Robust Kernels:" << endl; for (size_t i = 0; i < kernels.size(); ++i) { cout << kernels[i] << endl; } } SparseOptimizer optimizer; optimizer.setVerbose(verbose); optimizer.setForceStopFlag(&hasToStop); SparseOptimizerTerminateAction* terminateAction = 0; if (maxIterations < 0) { cerr << "# setup termination criterion based on the gain of the iteration" << endl; maxIterations = maxIterationsWithGain; terminateAction = new SparseOptimizerTerminateAction; terminateAction->setGainThreshold(gain); terminateAction->setMaxIterations(maxIterationsWithGain); optimizer.addPostIterationAction(terminateAction); } // allocating the desired solver + testing whether the solver is okay OptimizationAlgorithmProperty solverProperty; optimizer.setAlgorithm(solverFactory->construct(strSolver, solverProperty)); if (! optimizer.solver()) { cerr << "Error allocating solver. Allocating \"" << strSolver << "\" failed!" << endl; return 0; } if (solverProperties.size() > 0) { bool updateStatus = optimizer.solver()->updatePropertiesFromString(solverProperties); if (! updateStatus) { cerr << "Failure while updating the solver properties from the given string" << endl; } } if (solverProperties.size() > 0 || printSolverProperties) { optimizer.solver()->printProperties(cerr); } // Loading the input data if (loadLookup.size() > 0) { optimizer.setRenamedTypesFromString(loadLookup); } if (inputFilename.size() == 0) { cerr << "No input data specified" << endl; return 0; } else if (inputFilename == "-") { cerr << "Read input from stdin" << endl; if (!optimizer.load(cin)) { cerr << "Error loading graph" << endl; return 2; } } else { cerr << "Read input from " << inputFilename << endl; ifstream ifs(inputFilename.c_str()); if (!ifs) { cerr << "Failed to open file" << endl; return 1; } if (!optimizer.load(ifs)) { cerr << "Error loading graph" << endl; return 2; } } cerr << "Loaded " << optimizer.vertices().size() << " vertices" << endl; cerr << "Loaded " << optimizer.edges().size() << " edges" << endl; if (optimizer.vertices().size() == 0) { cerr << "Graph contains no vertices" << endl; return 1; } set<int> vertexDimensions = optimizer.dimensions(); if (! optimizer.isSolverSuitable(solverProperty, vertexDimensions)) { cerr << "The selected solver is not suitable for optimizing the given graph" << endl; return 3; } assert (optimizer.solver()); //optimizer.setMethod(str2method(strMethod)); //optimizer.setUserLambdaInit(lambdaInit); // check for vertices to fix to remove DoF bool gaugeFreedom = optimizer.gaugeFreedom(); OptimizableGraph::Vertex* gauge=0; if (gaugeList.size()){ cerr << "Fixing gauges: "; for (size_t i=0; i<gaugeList.size(); i++){ int id=gaugeList[i]; OptimizableGraph::Vertex* v=optimizer.vertex(id); if (!v){ cerr << "fatal, not found the vertex of id " << id << " in the gaugeList. Aborting"; return -1; } else { if (i==0) gauge = v; cerr << v->id() << " "; v->setFixed(1); } } cerr << endl; gaugeFreedom = false; } else { gauge=optimizer.findGauge(); } if (gaugeFreedom) { if (! gauge) { cerr << "# cannot find a vertex to fix in this thing" << endl; return 2; } else { cerr << "# graph is fixed by node " << gauge->id() << endl; gauge->setFixed(true); } } else { cerr << "# graph is fixed by priors or already fixed vertex" << endl; } // if schur, we wanna marginalize the landmarks... if (marginalize || solverProperty.requiresMarginalize) { int maxDim = *vertexDimensions.rbegin(); int minDim = *vertexDimensions.begin(); if (maxDim != minDim) { cerr << "# Preparing Marginalization of the Landmarks ... "; for (HyperGraph::VertexIDMap::iterator it=optimizer.vertices().begin(); it!=optimizer.vertices().end(); it++){ OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second); if (v->dimension() != maxDim) { v->setMarginalized(true); } } cerr << "done." << endl; } } if (robustKernel.size() > 0) { AbstractRobustKernelCreator* creator = RobustKernelFactory::instance()->creator(robustKernel); cerr << "# Preparing robust error function ... "; if (creator) { if (nonSequential) { for (SparseOptimizer::EdgeSet::iterator it = optimizer.edges().begin(); it != optimizer.edges().end(); ++it) { SparseOptimizer::Edge* e = dynamic_cast<SparseOptimizer::Edge*>(*it); if (e->vertices().size() >= 2 && std::abs(e->vertex(0)->id() - e->vertex(1)->id()) != 1) { e->setRobustKernel(creator->construct()); if (huberWidth > 0) e->robustKernel()->setDelta(huberWidth); } } } else { for (SparseOptimizer::EdgeSet::iterator it = optimizer.edges().begin(); it != optimizer.edges().end(); ++it) { SparseOptimizer::Edge* e = dynamic_cast<SparseOptimizer::Edge*>(*it); e->setRobustKernel(creator->construct()); if (huberWidth > 0) e->robustKernel()->setDelta(huberWidth); } } cerr << "done." << endl; } else { cerr << "Unknown Robust Kernel: " << robustKernel << endl; } } // sanity check HyperDijkstra d(&optimizer); UniformCostFunction f; d.shortestPaths(gauge,&f); //cerr << PVAR(d.visited().size()) << endl; if (d.visited().size()!=optimizer.vertices().size()) { cerr << CL_RED("Warning: d.visited().size() != optimizer.vertices().size()") << endl; cerr << "visited: " << d.visited().size() << endl; cerr << "vertices: " << optimizer.vertices().size() << endl; } if (incremental) { cerr << CL_RED("# Note: this variant performs batch steps in each time step") << endl; cerr << CL_RED("# For a variant which updates the Cholesky factor use the binary g2o_incremental") << endl; int incIterations = maxIterations; if (! arg.parsedParam("i")) { cerr << "# Setting default number of iterations" << endl; incIterations = 1; } int updateDisplayEveryN = updateGraphEachN; int maxDim = 0; cerr << "# incremental settings" << endl; cerr << "#\t solve every " << updateGraphEachN << endl; cerr << "#\t iterations " << incIterations << endl; SparseOptimizer::VertexIDMap vertices = optimizer.vertices(); for (SparseOptimizer::VertexIDMap::const_iterator it = vertices.begin(); it != vertices.end(); ++it) { const SparseOptimizer::Vertex* v = static_cast<const SparseOptimizer::Vertex*>(it->second); maxDim = max(maxDim, v->dimension()); } vector<SparseOptimizer::Edge*> edges; for (SparseOptimizer::EdgeSet::iterator it = optimizer.edges().begin(); it != optimizer.edges().end(); ++it) { SparseOptimizer::Edge* e = dynamic_cast<SparseOptimizer::Edge*>(*it); edges.push_back(e); } optimizer.edges().clear(); optimizer.vertices().clear(); optimizer.setVerbose(false); // sort the edges in a way that inserting them makes sense sort(edges.begin(), edges.end(), IncrementalEdgesCompare()); double cumTime = 0.; int vertexCount=0; int lastOptimizedVertexCount = 0; int lastVisUpdateVertexCount = 0; bool freshlyOptimized=false; bool firstRound = true; HyperGraph::VertexSet verticesAdded; HyperGraph::EdgeSet edgesAdded; for (vector<SparseOptimizer::Edge*>::iterator it = edges.begin(); it != edges.end(); ++it) { SparseOptimizer::Edge* e = *it; int doInit = 0; SparseOptimizer::Vertex* v1 = optimizer.vertex(e->vertices()[0]->id()); SparseOptimizer::Vertex* v2 = optimizer.vertex(e->vertices()[1]->id()); if (! v1) { SparseOptimizer::Vertex* v = v1 = dynamic_cast<SparseOptimizer::Vertex*>(e->vertices()[0]); bool v1Added = optimizer.addVertex(v); //cerr << "adding" << v->id() << "(" << v->dimension() << ")" << endl; assert(v1Added); if (! v1Added) cerr << "Error adding vertex " << v->id() << endl; else verticesAdded.insert(v); doInit = 1; if (v->dimension() == maxDim) vertexCount++; } if (! v2) { SparseOptimizer::Vertex* v = v2 = dynamic_cast<SparseOptimizer::Vertex*>(e->vertices()[1]); bool v2Added = optimizer.addVertex(v); //cerr << "adding" << v->id() << "(" << v->dimension() << ")" << endl; assert(v2Added); if (! v2Added) cerr << "Error adding vertex " << v->id() << endl; else verticesAdded.insert(v); doInit = 2; if (v->dimension() == maxDim) vertexCount++; } // adding the edge and initialization of the vertices { //cerr << " adding edge " << e->vertices()[0]->id() << " " << e->vertices()[1]->id() << endl; if (! optimizer.addEdge(e)) { cerr << "Unable to add edge " << e->vertices()[0]->id() << " -> " << e->vertices()[1]->id() << endl; } else { edgesAdded.insert(e); } if (doInit) { OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]); OptimizableGraph::Vertex* to = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]); switch (doInit){ case 1: // initialize v1 from v2 { HyperGraph::VertexSet toSet; toSet.insert(to); if (e->initialEstimatePossible(toSet, from) > 0.) { //cerr << "init: " //<< to->id() << "(" << to->dimension() << ") -> " //<< from->id() << "(" << from->dimension() << ") " << endl; e->initialEstimate(toSet, from); } else { assert(0 && "Added unitialized variable to the graph"); } break; } case 2: { HyperGraph::VertexSet fromSet; fromSet.insert(from); if (e->initialEstimatePossible(fromSet, to) > 0.) { //cerr << "init: " //<< from->id() << "(" << from->dimension() << ") -> " //<< to->id() << "(" << to->dimension() << ") " << endl; e->initialEstimate(fromSet, to); } else { assert(0 && "Added unitialized variable to the graph"); } break; } default: cerr << "doInit wrong value\n"; } } } freshlyOptimized=false; { //cerr << "Optimize" << endl; if (vertexCount - lastOptimizedVertexCount >= updateGraphEachN) { if (firstRound) { if (!optimizer.initializeOptimization()){ cerr << "initialization failed" << endl; return 0; } } else { if (! optimizer.updateInitialization(verticesAdded, edgesAdded)) { cerr << "updating initialization failed" << endl; return 0; } } verticesAdded.clear(); edgesAdded.clear(); double ts = get_monotonic_time(); int currentIt=optimizer.optimize(incIterations, !firstRound); double dts = get_monotonic_time() - ts; cumTime += dts; firstRound = false; //optimizer->setOptimizationTime(cumTime); if (verbose) { double chi2 = optimizer.chi2(); cerr << "nodes= " << optimizer.vertices().size() << "\t edges= " << optimizer.edges().size() << "\t chi2= " << chi2 << "\t time= " << dts << "\t iterations= " << currentIt << "\t cumTime= " << cumTime << endl; } lastOptimizedVertexCount = vertexCount; freshlyOptimized = true; if (guiOut) { if (vertexCount - lastVisUpdateVertexCount >= updateDisplayEveryN) { dumpEdges(cout, optimizer); lastVisUpdateVertexCount = vertexCount; } } } if (! verbose) cerr << "."; } } // for all edges if (! freshlyOptimized) { double ts = get_monotonic_time(); int currentIt=optimizer.optimize(incIterations, !firstRound); double dts = get_monotonic_time() - ts; cumTime += dts; //optimizer->setOptimizationTime(cumTime); if (verbose) { double chi2 = optimizer.chi2(); cerr << "nodes= " << optimizer.vertices().size() << "\t edges= " << optimizer.edges().size() << "\t chi2= " << chi2 << "\t time= " << dts << "\t iterations= " << currentIt << "\t cumTime= " << cumTime << endl; } } } else { // BATCH optimization if (statsFile!=""){ // allocate buffer for statistics; optimizer.setComputeBatchStatistics(true); } optimizer.initializeOptimization(); optimizer.computeActiveErrors(); double loadChi = optimizer.chi2(); cerr << "Initial chi2 = " << FIXED(loadChi) << endl; if (initialGuess) { optimizer.computeInitialGuess(); } else if (initialGuessOdometry) { EstimatePropagatorCostOdometry costFunction(&optimizer); optimizer.computeInitialGuess(costFunction); } double initChi = optimizer.chi2(); signal(SIGINT, sigquit_handler); int result=optimizer.optimize(maxIterations); if (maxIterations > 0 && result==OptimizationAlgorithm::Fail){ cerr << "Cholesky failed, result might be invalid" << endl; } else if (computeMarginals){ std::vector<std::pair<int, int> > blockIndices; for (size_t i=0; i<optimizer.activeVertices().size(); i++){ OptimizableGraph::Vertex* v=optimizer.activeVertices()[i]; if (v->hessianIndex()>=0){ blockIndices.push_back(make_pair(v->hessianIndex(), v->hessianIndex())); } if (v->hessianIndex()>0){ blockIndices.push_back(make_pair(v->hessianIndex()-1, v->hessianIndex())); } } SparseBlockMatrix<MatrixXd> spinv; if (optimizer.computeMarginals(spinv, blockIndices)) { for (size_t i=0; i<optimizer.activeVertices().size(); i++){ OptimizableGraph::Vertex* v=optimizer.activeVertices()[i]; cerr << "Vertex id:" << v->id() << endl; if (v->hessianIndex()>=0){ cerr << "inv block :" << v->hessianIndex() << ", " << v->hessianIndex()<< endl; cerr << *(spinv.block(v->hessianIndex(), v->hessianIndex())); cerr << endl; } if (v->hessianIndex()>0){ cerr << "inv block :" << v->hessianIndex()-1 << ", " << v->hessianIndex()<< endl; cerr << *(spinv.block(v->hessianIndex()-1, v->hessianIndex())); cerr << endl; } } } } optimizer.computeActiveErrors(); double finalChi=optimizer.chi2(); if (summaryFile!="") { PropertyMap summary; summary.makeProperty<StringProperty>("filename", inputFilename); summary.makeProperty<IntProperty>("n_vertices", optimizer.vertices().size()); summary.makeProperty<IntProperty>("n_edges", optimizer.edges().size()); int nLandmarks=0; int nPoses=0; int maxDim = *vertexDimensions.rbegin(); for (HyperGraph::VertexIDMap::iterator it=optimizer.vertices().begin(); it!=optimizer.vertices().end(); it++){ OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(it->second); if (v->dimension() != maxDim) { nLandmarks++; } else nPoses++; } set<string> edgeTypes; for (HyperGraph::EdgeSet::iterator it=optimizer.edges().begin(); it!=optimizer.edges().end(); it++){ edgeTypes.insert(Factory::instance()->tag(*it)); } stringstream edgeTypesString; for (std::set<string>::iterator it=edgeTypes.begin(); it!=edgeTypes.end(); it++){ edgeTypesString << *it << " "; } summary.makeProperty<IntProperty>("n_poses", nPoses); summary.makeProperty<IntProperty>("n_landmarks", nLandmarks); summary.makeProperty<StringProperty>("edge_types", edgeTypesString.str()); summary.makeProperty<DoubleProperty>("load_chi", loadChi); summary.makeProperty<StringProperty>("solver", strSolver); summary.makeProperty<BoolProperty>("robustKernel", robustKernel.size() > 0); summary.makeProperty<DoubleProperty>("init_chi", initChi); summary.makeProperty<DoubleProperty>("final_chi", finalChi); summary.makeProperty<IntProperty>("maxIterations", maxIterations); summary.makeProperty<IntProperty>("realIterations", result); ofstream os; os.open(summaryFile.c_str(), ios::app); summary.writeToCSV(os); } if (statsFile!=""){ cerr << "writing stats to file \"" << statsFile << "\" ... "; ofstream os(statsFile.c_str()); const BatchStatisticsContainer& bsc = optimizer.batchStatistics(); for (int i=0; i<maxIterations; i++) { os << bsc[i] << endl; } cerr << "done." << endl; } } // saving again if (gnudump.size() > 0) { bool gnuPlotStatus = saveGnuplot(gnudump, optimizer); if (! gnuPlotStatus) { cerr << "Error while writing gnuplot files" << endl; } } if (outputfilename.size() > 0) { if (outputfilename == "-") { cerr << "saving to stdout"; optimizer.save(cout); } else { cerr << "saving " << outputfilename << " ... "; optimizer.save(outputfilename.c_str()); } cerr << "done." << endl; } // destroy all the singletons //Factory::destroy(); //OptimizationAlgorithmFactory::destroy(); //HyperGraphActionLibrary::destroy(); return 0; }
bool SparseOptimizer::initializeOptimization(HyperGraph::VertexSet& vset, int level){ if (edges().size() == 0) { cerr << __PRETTY_FUNCTION__ << ": Attempt to initialize an empty graph" << endl; return false; } bool workspaceAllocated = _jacobianWorkspace.allocate(); (void) workspaceAllocated; assert(workspaceAllocated && "Error while allocating memory for the Jacobians"); clearIndexMapping(); _activeVertices.clear(); _activeVertices.reserve(vset.size()); _activeEdges.clear(); set<Edge*> auxEdgeSet; // temporary structure to avoid duplicates for (HyperGraph::VertexSet::iterator it=vset.begin(); it!=vset.end(); ++it){ OptimizableGraph::Vertex* v= (OptimizableGraph::Vertex*) *it; const OptimizableGraph::EdgeSet& vEdges=v->edges(); // count if there are edges in that level. If not remove from the pool int levelEdges=0; for (OptimizableGraph::EdgeSet::const_iterator it=vEdges.begin(); it!=vEdges.end(); ++it){ OptimizableGraph::Edge* e=reinterpret_cast<OptimizableGraph::Edge*>(*it); if (level < 0 || e->level() == level) { bool allVerticesOK = true; for (vector<HyperGraph::Vertex*>::const_iterator vit = e->vertices().begin(); vit != e->vertices().end(); ++vit) { if (vset.find(*vit) == vset.end()) { allVerticesOK = false; break; } } if (allVerticesOK && !e->allVerticesFixed()) { auxEdgeSet.insert(e); levelEdges++; } } } if (levelEdges){ _activeVertices.push_back(v); // test for NANs in the current estimate if we are debugging # ifndef NDEBUG int estimateDim = v->estimateDimension(); if (estimateDim > 0) { Eigen::VectorXd estimateData(estimateDim); if (v->getEstimateData(estimateData.data()) == true) { int k; bool hasNan = arrayHasNaN(estimateData.data(), estimateDim, &k); if (hasNan) cerr << __PRETTY_FUNCTION__ << ": Vertex " << v->id() << " contains a nan entry at index " << k << endl; } } # endif } } _activeEdges.reserve(auxEdgeSet.size()); for (set<Edge*>::iterator it = auxEdgeSet.begin(); it != auxEdgeSet.end(); ++it) _activeEdges.push_back(*it); sortVectorContainers(); return buildIndexMapping(_activeVertices); }
bool SparseOptimizerIncremental::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset) { if (batchStep) { return SparseOptimizerOnline::updateInitialization(vset, eset); } for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it); v->clearQuadraticForm(); // be sure that b is zero for this vertex } // get the touched vertices _touchedVertices.clear(); for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]); OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]); if (! v1->fixed()) _touchedVertices.insert(v1); if (! v2->fixed()) _touchedVertices.insert(v2); } //cerr << PVAR(_touchedVertices.size()) << endl; // updating the internal structures std::vector<HyperGraph::Vertex*> newVertices; newVertices.reserve(vset.size()); _activeVertices.reserve(_activeVertices.size() + vset.size()); _activeEdges.reserve(_activeEdges.size() + eset.size()); for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) _activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it)); //cerr << "updating internal done." << endl; // update the index mapping size_t next = _ivMap.size(); for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) { OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it); if (! v->fixed()){ if (! v->marginalized()){ v->setHessianIndex(next); _ivMap.push_back(v); newVertices.push_back(v); _activeVertices.push_back(v); next++; } else // not supported right now abort(); } else { v->setHessianIndex(-1); } } //cerr << "updating index mapping done." << endl; // backup the tempindex and prepare sorting structure VertexBackup backupIdx[_touchedVertices.size()]; memset(backupIdx, 0, sizeof(VertexBackup) * _touchedVertices.size()); int idx = 0; for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it); backupIdx[idx].hessianIndex = v->hessianIndex(); backupIdx[idx].vertex = v; backupIdx[idx].hessianData = v->hessianData(); ++idx; } sort(backupIdx, backupIdx + _touchedVertices.size()); // sort according to the hessianIndex which is the same order as used later by the optimizer for (int i = 0; i < idx; ++i) { backupIdx[i].vertex->setHessianIndex(i); } //cerr << "backup tempindex done." << endl; // building the structure of the update _updateMat.clear(true); // get rid of the old matrix structure _updateMat.rowBlockIndices().clear(); _updateMat.colBlockIndices().clear(); _updateMat.blockCols().clear(); // placing the current stuff in _updateMat MatrixXd* lastBlock = 0; int sizePoses = 0; for (int i = 0; i < idx; ++i) { OptimizableGraph::Vertex* v = backupIdx[i].vertex; int dim = v->dimension(); sizePoses+=dim; _updateMat.rowBlockIndices().push_back(sizePoses); _updateMat.colBlockIndices().push_back(sizePoses); _updateMat.blockCols().push_back(SparseBlockMatrix<MatrixXd>::IntBlockMap()); int ind = v->hessianIndex(); //cerr << PVAR(ind) << endl; if (ind >= 0) { MatrixXd* m = _updateMat.block(ind, ind, true); v->mapHessianMemory(m->data()); lastBlock = m; } } lastBlock->diagonal().array() += 1e-6; // HACK to get Eigen value > 0 for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertices()[0]; OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertices()[1]; int ind1 = v1->hessianIndex(); if (ind1 == -1) continue; int ind2 = v2->hessianIndex(); if (ind2 == -1) continue; bool transposedBlock = ind1 > ind2; if (transposedBlock) // make sure, we allocate the upper triangular block swap(ind1, ind2); MatrixXd* m = _updateMat.block(ind1, ind2, true); e->mapHessianMemory(m->data(), 0, 1, transposedBlock); } // build the system into _updateMat for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge * e = static_cast<OptimizableGraph::Edge*>(*it); e->computeError(); } for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); e->linearizeOplus(); } for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); e->constructQuadraticForm(); } // restore the original data for the vertex for (int i = 0; i < idx; ++i) { backupIdx[i].vertex->setHessianIndex(backupIdx[i].hessianIndex); if (backupIdx[i].hessianData) backupIdx[i].vertex->mapHessianMemory(backupIdx[i].hessianData); } // update the structure of the real block matrix bool solverStatus = _algorithm->updateStructure(newVertices, eset); bool updateStatus = computeCholeskyUpdate(); if (! updateStatus) { cerr << "Error while computing update" << endl; } cholmod_sparse* updateAsSparseFactor = cholmod_factor_to_sparse(_cholmodFactor, &_cholmodCommon); // convert CCS update by permuting back to the permutation of L if (updateAsSparseFactor->nzmax > _permutedUpdate->nzmax) { //cerr << "realloc _permutedUpdate" << endl; cholmod_reallocate_triplet(updateAsSparseFactor->nzmax, _permutedUpdate, &_cholmodCommon); } _permutedUpdate->nnz = 0; _permutedUpdate->nrow = _permutedUpdate->ncol = _L->n; { int* Ap = (int*)updateAsSparseFactor->p; int* Ai = (int*)updateAsSparseFactor->i; double* Ax = (double*)updateAsSparseFactor->x; int* Bj = (int*)_permutedUpdate->j; int* Bi = (int*)_permutedUpdate->i; double* Bx = (double*)_permutedUpdate->x; for (size_t c = 0; c < updateAsSparseFactor->ncol; ++c) { const int& rbeg = Ap[c]; const int& rend = Ap[c+1]; int cc = c / slamDimension; int coff = c % slamDimension; const int& cbase = backupIdx[cc].vertex->colInHessian(); const int& ccol = _perm(cbase + coff); for (int j = rbeg; j < rend; j++) { const int& r = Ai[j]; const double& val = Ax[j]; int rr = r / slamDimension; int roff = r % slamDimension; const int& rbase = backupIdx[rr].vertex->colInHessian(); int row = _perm(rbase + roff); int col = ccol; if (col > row) // lower triangular entry swap(col, row); Bi[_permutedUpdate->nnz] = row; Bj[_permutedUpdate->nnz] = col; Bx[_permutedUpdate->nnz] = val; ++_permutedUpdate->nnz; } } } cholmod_free_sparse(&updateAsSparseFactor, &_cholmodCommon); #if 0 cholmod_sparse* updatePermuted = cholmod_triplet_to_sparse(_permutedUpdate, _permutedUpdate->nnz, &_cholmodCommon); //writeCCSMatrix("update-permuted.txt", updatePermuted->nrow, updatePermuted->ncol, (int*)updatePermuted->p, (int*)updatePermuted->i, (double*)updatePermuted->x, false); _solverInterface->choleskyUpdate(updatePermuted); cholmod_free_sparse(&updatePermuted, &_cholmodCommon); #else convertTripletUpdateToSparse(); _solverInterface->choleskyUpdate(_permutedUpdateAsSparse); #endif return solverStatus; }
bool SolverSLAM2DLinear::solveOrientation() { assert(_optimizer->indexMapping().size() + 1 == _optimizer->vertices().size() && "Needs to operate on full graph"); assert(_optimizer->vertex(0)->fixed() && "Graph is not fixed by vertex 0"); VectorXD b, x; // will be used for theta and x/y update b.setZero(_optimizer->indexMapping().size()); x.setZero(_optimizer->indexMapping().size()); typedef Eigen::Matrix<double, 1, 1, Eigen::ColMajor> ScalarMatrix; ScopedArray<int> blockIndeces(new int[_optimizer->indexMapping().size()]); for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) blockIndeces[i] = i+1; SparseBlockMatrix<ScalarMatrix> H(blockIndeces.get(), blockIndeces.get(), _optimizer->indexMapping().size(), _optimizer->indexMapping().size()); // building the structure, diagonal for each active vertex for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) { OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i]; int poseIdx = v->hessianIndex(); ScalarMatrix* m = H.block(poseIdx, poseIdx, true); m->setZero(); } HyperGraph::VertexSet fixedSet; // off diagonal for each edge for (SparseOptimizer::EdgeContainer::const_iterator it = _optimizer->activeEdges().begin(); it != _optimizer->activeEdges().end(); ++it) { # ifndef NDEBUG EdgeSE2* e = dynamic_cast<EdgeSE2*>(*it); assert(e && "Active edges contain non-odometry edge"); // # else EdgeSE2* e = static_cast<EdgeSE2*>(*it); # endif OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]); OptimizableGraph::Vertex* to = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]); int ind1 = from->hessianIndex(); int ind2 = to->hessianIndex(); if (ind1 == -1 || ind2 == -1) { if (ind1 == -1) fixedSet.insert(from); // collect the fixed vertices if (ind2 == -1) fixedSet.insert(to); continue; } bool transposedBlock = ind1 > ind2; if (transposedBlock){ // make sure, we allocate the upper triangle block std::swap(ind1, ind2); } ScalarMatrix* m = H.block(ind1, ind2, true); m->setZero(); } // walk along the Minimal Spanning Tree to compute the guess for the robot orientation assert(fixedSet.size() == 1); VertexSE2* root = static_cast<VertexSE2*>(*fixedSet.begin()); VectorXD thetaGuess; thetaGuess.setZero(_optimizer->indexMapping().size()); UniformCostFunction uniformCost; HyperDijkstra hyperDijkstra(_optimizer); hyperDijkstra.shortestPaths(root, &uniformCost); HyperDijkstra::computeTree(hyperDijkstra.adjacencyMap()); ThetaTreeAction thetaTreeAction(thetaGuess.data()); HyperDijkstra::visitAdjacencyMap(hyperDijkstra.adjacencyMap(), &thetaTreeAction); // construct for the orientation for (SparseOptimizer::EdgeContainer::const_iterator it = _optimizer->activeEdges().begin(); it != _optimizer->activeEdges().end(); ++it) { EdgeSE2* e = static_cast<EdgeSE2*>(*it); VertexSE2* from = static_cast<VertexSE2*>(e->vertices()[0]); VertexSE2* to = static_cast<VertexSE2*>(e->vertices()[1]); double omega = e->information()(2,2); double fromThetaGuess = from->hessianIndex() < 0 ? 0. : thetaGuess[from->hessianIndex()]; double toThetaGuess = to->hessianIndex() < 0 ? 0. : thetaGuess[to->hessianIndex()]; double error = normalize_theta(-e->measurement().rotation().angle() + toThetaGuess - fromThetaGuess); bool fromNotFixed = !(from->fixed()); bool toNotFixed = !(to->fixed()); if (fromNotFixed || toNotFixed) { double omega_r = - omega * error; if (fromNotFixed) { b(from->hessianIndex()) -= omega_r; (*H.block(from->hessianIndex(), from->hessianIndex()))(0,0) += omega; if (toNotFixed) { if (from->hessianIndex() > to->hessianIndex()) (*H.block(to->hessianIndex(), from->hessianIndex()))(0,0) -= omega; else (*H.block(from->hessianIndex(), to->hessianIndex()))(0,0) -= omega; } } if (toNotFixed ) { b(to->hessianIndex()) += omega_r; (*H.block(to->hessianIndex(), to->hessianIndex()))(0,0) += omega; } } } // solve orientation typedef LinearSolverCSparse<ScalarMatrix> SystemSolver; SystemSolver linearSystemSolver; linearSystemSolver.init(); bool ok = linearSystemSolver.solve(H, x.data(), b.data()); if (!ok) { cerr << __PRETTY_FUNCTION__ << "Failure while solving linear system" << endl; return false; } // update the orientation of the 2D poses and set translation to 0, GN shall solve that root->setToOrigin(); for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) { VertexSE2* v = static_cast<VertexSE2*>(_optimizer->indexMapping()[i]); int poseIdx = v->hessianIndex(); SE2 poseUpdate(0, 0, normalize_theta(thetaGuess(poseIdx) + x(poseIdx))); v->setEstimate(poseUpdate); } return true; }