bool SparseOptimizer::removeVertex(HyperGraph::Vertex* v) { OptimizableGraph::Vertex* vv = static_cast<OptimizableGraph::Vertex*>(v); if (vv->hessianIndex() >= 0) { clearIndexMapping(); _ivMap.clear(); } return HyperGraph::removeVertex(v); }
bool BlockSolver<Traits>::updateStructure(const std::vector<HyperGraph::Vertex*>& vset, const HyperGraph::EdgeSet& edges) { for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin(); vit != vset.end(); ++vit) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit); int dim = v->dimension(); if (! v->marginalized()){ v->setColInHessian(_sizePoses); _sizePoses+=dim; _Hpp->rowBlockIndices().push_back(_sizePoses); _Hpp->colBlockIndices().push_back(_sizePoses); _Hpp->blockCols().push_back(typename SparseBlockMatrix<PoseMatrixType>::IntBlockMap()); ++_numPoses; int ind = v->hessianIndex(); PoseMatrixType* m = _Hpp->block(ind, ind, true); v->mapHessianMemory(m->data()); } else { std::cerr << "updateStructure(): Schur not supported" << std::endl; abort(); } } resizeVector(_sizePoses + _sizeLandmarks); for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) { OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx); int ind1 = v1->hessianIndex(); int indexV1Bak = ind1; if (ind1 == -1) continue; for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) { OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx); int ind2 = v2->hessianIndex(); if (ind2 == -1) continue; ind1 = indexV1Bak; bool transposedBlock = ind1 > ind2; if (transposedBlock) // make sure, we allocate the upper triangular block std::swap(ind1, ind2); if (! v1->marginalized() && !v2->marginalized()) { PoseMatrixType* m = _Hpp->block(ind1, ind2, true); e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock); } else { std::cerr << __PRETTY_FUNCTION__ << ": not supported" << std::endl; } } } } return true; }
void SparseOptimizer::computeInitialGuess(EstimatePropagatorCost& costFunction) { OptimizableGraph::VertexSet emptySet; std::set<Vertex*> backupVertices; HyperGraph::VertexSet fixedVertices; // these are the root nodes where to start the initialization for (EdgeContainer::iterator it = _activeEdges.begin(); it != _activeEdges.end(); ++it) { OptimizableGraph::Edge* e = *it; for (size_t i = 0; i < e->vertices().size(); ++i) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(e->vertex(i)); if (!v) continue; if (v->fixed()) fixedVertices.insert(v); else { // check for having a prior which is able to fully initialize a vertex for (EdgeSet::const_iterator vedgeIt = v->edges().begin(); vedgeIt != v->edges().end(); ++vedgeIt) { OptimizableGraph::Edge* vedge = static_cast<OptimizableGraph::Edge*>(*vedgeIt); if (vedge->vertices().size() == 1 && vedge->initialEstimatePossible(emptySet, v) > 0.) { //cerr << "Initialize with prior for " << v->id() << endl; vedge->initialEstimate(emptySet, v); fixedVertices.insert(v); } } } if (v->hessianIndex() == -1) { std::set<Vertex*>::const_iterator foundIt = backupVertices.find(v); if (foundIt == backupVertices.end()) { v->push(); backupVertices.insert(v); } } } } EstimatePropagator estimatePropagator(this); estimatePropagator.propagate(fixedVertices, costFunction); // restoring the vertices that should not be initialized for (std::set<Vertex*>::iterator it = backupVertices.begin(); it != backupVertices.end(); ++it) { Vertex* v = *it; v->pop(); } if (verbose()) { computeActiveErrors(); cerr << "iteration= -1\t chi2= " << activeChi2() << "\t time= 0.0" << "\t cumTime= 0.0" << "\t (using initial guess from " << costFunction.name() << ")" << endl; } }
bool SparseOptimizerIncremental::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset) { if (batchStep) { return SparseOptimizerOnline::updateInitialization(vset, eset); } for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it); v->clearQuadraticForm(); // be sure that b is zero for this vertex } // get the touched vertices _touchedVertices.clear(); for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]); OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]); if (! v1->fixed()) _touchedVertices.insert(v1); if (! v2->fixed()) _touchedVertices.insert(v2); } //cerr << PVAR(_touchedVertices.size()) << endl; // updating the internal structures std::vector<HyperGraph::Vertex*> newVertices; newVertices.reserve(vset.size()); _activeVertices.reserve(_activeVertices.size() + vset.size()); _activeEdges.reserve(_activeEdges.size() + eset.size()); for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) _activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it)); //cerr << "updating internal done." << endl; // update the index mapping size_t next = _ivMap.size(); for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) { OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it); if (! v->fixed()){ if (! v->marginalized()){ v->setHessianIndex(next); _ivMap.push_back(v); newVertices.push_back(v); _activeVertices.push_back(v); next++; } else // not supported right now abort(); } else { v->setHessianIndex(-1); } } //cerr << "updating index mapping done." << endl; // backup the tempindex and prepare sorting structure VertexBackup backupIdx[_touchedVertices.size()]; memset(backupIdx, 0, sizeof(VertexBackup) * _touchedVertices.size()); int idx = 0; for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it); backupIdx[idx].hessianIndex = v->hessianIndex(); backupIdx[idx].vertex = v; backupIdx[idx].hessianData = v->hessianData(); ++idx; } sort(backupIdx, backupIdx + _touchedVertices.size()); // sort according to the hessianIndex which is the same order as used later by the optimizer for (int i = 0; i < idx; ++i) { backupIdx[i].vertex->setHessianIndex(i); } //cerr << "backup tempindex done." << endl; // building the structure of the update _updateMat.clear(true); // get rid of the old matrix structure _updateMat.rowBlockIndices().clear(); _updateMat.colBlockIndices().clear(); _updateMat.blockCols().clear(); // placing the current stuff in _updateMat MatrixXd* lastBlock = 0; int sizePoses = 0; for (int i = 0; i < idx; ++i) { OptimizableGraph::Vertex* v = backupIdx[i].vertex; int dim = v->dimension(); sizePoses+=dim; _updateMat.rowBlockIndices().push_back(sizePoses); _updateMat.colBlockIndices().push_back(sizePoses); _updateMat.blockCols().push_back(SparseBlockMatrix<MatrixXd>::IntBlockMap()); int ind = v->hessianIndex(); //cerr << PVAR(ind) << endl; if (ind >= 0) { MatrixXd* m = _updateMat.block(ind, ind, true); v->mapHessianMemory(m->data()); lastBlock = m; } } lastBlock->diagonal().array() += 1e-6; // HACK to get Eigen value > 0 for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertices()[0]; OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertices()[1]; int ind1 = v1->hessianIndex(); if (ind1 == -1) continue; int ind2 = v2->hessianIndex(); if (ind2 == -1) continue; bool transposedBlock = ind1 > ind2; if (transposedBlock) // make sure, we allocate the upper triangular block swap(ind1, ind2); MatrixXd* m = _updateMat.block(ind1, ind2, true); e->mapHessianMemory(m->data(), 0, 1, transposedBlock); } // build the system into _updateMat for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge * e = static_cast<OptimizableGraph::Edge*>(*it); e->computeError(); } for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); e->linearizeOplus(); } for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); e->constructQuadraticForm(); } // restore the original data for the vertex for (int i = 0; i < idx; ++i) { backupIdx[i].vertex->setHessianIndex(backupIdx[i].hessianIndex); if (backupIdx[i].hessianData) backupIdx[i].vertex->mapHessianMemory(backupIdx[i].hessianData); } // update the structure of the real block matrix bool solverStatus = _algorithm->updateStructure(newVertices, eset); bool updateStatus = computeCholeskyUpdate(); if (! updateStatus) { cerr << "Error while computing update" << endl; } cholmod_sparse* updateAsSparseFactor = cholmod_factor_to_sparse(_cholmodFactor, &_cholmodCommon); // convert CCS update by permuting back to the permutation of L if (updateAsSparseFactor->nzmax > _permutedUpdate->nzmax) { //cerr << "realloc _permutedUpdate" << endl; cholmod_reallocate_triplet(updateAsSparseFactor->nzmax, _permutedUpdate, &_cholmodCommon); } _permutedUpdate->nnz = 0; _permutedUpdate->nrow = _permutedUpdate->ncol = _L->n; { int* Ap = (int*)updateAsSparseFactor->p; int* Ai = (int*)updateAsSparseFactor->i; double* Ax = (double*)updateAsSparseFactor->x; int* Bj = (int*)_permutedUpdate->j; int* Bi = (int*)_permutedUpdate->i; double* Bx = (double*)_permutedUpdate->x; for (size_t c = 0; c < updateAsSparseFactor->ncol; ++c) { const int& rbeg = Ap[c]; const int& rend = Ap[c+1]; int cc = c / slamDimension; int coff = c % slamDimension; const int& cbase = backupIdx[cc].vertex->colInHessian(); const int& ccol = _perm(cbase + coff); for (int j = rbeg; j < rend; j++) { const int& r = Ai[j]; const double& val = Ax[j]; int rr = r / slamDimension; int roff = r % slamDimension; const int& rbase = backupIdx[rr].vertex->colInHessian(); int row = _perm(rbase + roff); int col = ccol; if (col > row) // lower triangular entry swap(col, row); Bi[_permutedUpdate->nnz] = row; Bj[_permutedUpdate->nnz] = col; Bx[_permutedUpdate->nnz] = val; ++_permutedUpdate->nnz; } } } cholmod_free_sparse(&updateAsSparseFactor, &_cholmodCommon); #if 0 cholmod_sparse* updatePermuted = cholmod_triplet_to_sparse(_permutedUpdate, _permutedUpdate->nnz, &_cholmodCommon); //writeCCSMatrix("update-permuted.txt", updatePermuted->nrow, updatePermuted->ncol, (int*)updatePermuted->p, (int*)updatePermuted->i, (double*)updatePermuted->x, false); _solverInterface->choleskyUpdate(updatePermuted); cholmod_free_sparse(&updatePermuted, &_cholmodCommon); #else convertTripletUpdateToSparse(); _solverInterface->choleskyUpdate(_permutedUpdateAsSparse); #endif return solverStatus; }
bool Star::labelStarEdges(int iterations, EdgeLabeler* labeler){ // mark all vertices in the lowLevelEdges as floating bool ok=true; std::set<OptimizableGraph::Vertex*> vset; for (HyperGraph::EdgeSet::iterator it=_lowLevelEdges.begin(); it!=_lowLevelEdges.end(); it++){ HyperGraph::Edge* e=*it; for (size_t i=0; i<e->vertices().size(); i++){ OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)e->vertices()[i]; v->setFixed(false); vset.insert(v); } } for (std::set<OptimizableGraph::Vertex*>::iterator it=vset.begin(); it!=vset.end(); it++){ OptimizableGraph::Vertex* v = *it; v->push(); } // fix all vertices in the gauge //cerr << "fixing gauge: "; for (HyperGraph::VertexSet::iterator it = _gauge.begin(); it!=_gauge.end(); it++){ OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)*it; //cerr << v->id() << " "; v->setFixed(true); } //cerr << endl; if (iterations>0){ _optimizer->initializeOptimization(_lowLevelEdges); _optimizer->computeInitialGuess(); int result=_optimizer->optimize(iterations); if (result<1){ cerr << "Vertices num: " << _optimizer->activeVertices().size() << "ids: "; for (size_t i=0; i<_optimizer->indexMapping().size(); i++){ cerr << _optimizer->indexMapping()[i]->id() << " " ; } cerr << endl; cerr << "!!! optimization failure" << endl; cerr << "star size=" << _lowLevelEdges.size() << endl; cerr << "gauge: "; for (HyperGraph::VertexSet::iterator it=_gauge.begin(); it!=_gauge.end(); it++){ OptimizableGraph::Vertex* v = (OptimizableGraph::Vertex*)*it; cerr << "[" << v->id() << " " << v->hessianIndex() << "] "; } cerr << endl; ok=false; } } else { optimizer()->initializeOptimization(_lowLevelEdges); // cerr << "guess" << endl; //optimizer()->computeInitialGuess(); // cerr << "solver init" << endl; optimizer()->solver()->init(); // cerr << "structure" << endl; OptimizationAlgorithmWithHessian* solverWithHessian = dynamic_cast<OptimizationAlgorithmWithHessian*> (optimizer()->solver()); if (!solverWithHessian->buildLinearStructure()) cerr << "FATAL: failure while building linear structure" << endl; // cerr << "errors" << endl; optimizer()->computeActiveErrors(); // cerr << "system" << endl; solverWithHessian->updateLinearSystem(); } std::set<OptimizableGraph::Edge*> star; for(HyperGraph::EdgeSet::iterator it=_starEdges.begin(); it!=_starEdges.end(); it++){ star.insert((OptimizableGraph::Edge*)*it); } if (ok) { int result = labeler->labelEdges(star); if (result < 0) ok=false; } // release all vertices in the gauge for (std::set<OptimizableGraph::Vertex*>::iterator it=vset.begin(); it!=vset.end(); it++){ OptimizableGraph::Vertex* v = *it; v->pop(); } for (HyperGraph::VertexSet::iterator it=_gauge.begin(); it!=_gauge.end(); it++){ OptimizableGraph::Vertex* v=(OptimizableGraph::Vertex*)*it; v->setFixed(false); } return ok; }
bool SolverSLAM2DLinear::solveOrientation() { assert(_optimizer->indexMapping().size() + 1 == _optimizer->vertices().size() && "Needs to operate on full graph"); assert(_optimizer->vertex(0)->fixed() && "Graph is not fixed by vertex 0"); VectorXD b, x; // will be used for theta and x/y update b.setZero(_optimizer->indexMapping().size()); x.setZero(_optimizer->indexMapping().size()); typedef Eigen::Matrix<double, 1, 1, Eigen::ColMajor> ScalarMatrix; ScopedArray<int> blockIndeces(new int[_optimizer->indexMapping().size()]); for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) blockIndeces[i] = i+1; SparseBlockMatrix<ScalarMatrix> H(blockIndeces.get(), blockIndeces.get(), _optimizer->indexMapping().size(), _optimizer->indexMapping().size()); // building the structure, diagonal for each active vertex for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) { OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i]; int poseIdx = v->hessianIndex(); ScalarMatrix* m = H.block(poseIdx, poseIdx, true); m->setZero(); } HyperGraph::VertexSet fixedSet; // off diagonal for each edge for (SparseOptimizer::EdgeContainer::const_iterator it = _optimizer->activeEdges().begin(); it != _optimizer->activeEdges().end(); ++it) { # ifndef NDEBUG EdgeSE2* e = dynamic_cast<EdgeSE2*>(*it); assert(e && "Active edges contain non-odometry edge"); // # else EdgeSE2* e = static_cast<EdgeSE2*>(*it); # endif OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]); OptimizableGraph::Vertex* to = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]); int ind1 = from->hessianIndex(); int ind2 = to->hessianIndex(); if (ind1 == -1 || ind2 == -1) { if (ind1 == -1) fixedSet.insert(from); // collect the fixed vertices if (ind2 == -1) fixedSet.insert(to); continue; } bool transposedBlock = ind1 > ind2; if (transposedBlock){ // make sure, we allocate the upper triangle block std::swap(ind1, ind2); } ScalarMatrix* m = H.block(ind1, ind2, true); m->setZero(); } // walk along the Minimal Spanning Tree to compute the guess for the robot orientation assert(fixedSet.size() == 1); VertexSE2* root = static_cast<VertexSE2*>(*fixedSet.begin()); VectorXD thetaGuess; thetaGuess.setZero(_optimizer->indexMapping().size()); UniformCostFunction uniformCost; HyperDijkstra hyperDijkstra(_optimizer); hyperDijkstra.shortestPaths(root, &uniformCost); HyperDijkstra::computeTree(hyperDijkstra.adjacencyMap()); ThetaTreeAction thetaTreeAction(thetaGuess.data()); HyperDijkstra::visitAdjacencyMap(hyperDijkstra.adjacencyMap(), &thetaTreeAction); // construct for the orientation for (SparseOptimizer::EdgeContainer::const_iterator it = _optimizer->activeEdges().begin(); it != _optimizer->activeEdges().end(); ++it) { EdgeSE2* e = static_cast<EdgeSE2*>(*it); VertexSE2* from = static_cast<VertexSE2*>(e->vertices()[0]); VertexSE2* to = static_cast<VertexSE2*>(e->vertices()[1]); double omega = e->information()(2,2); double fromThetaGuess = from->hessianIndex() < 0 ? 0. : thetaGuess[from->hessianIndex()]; double toThetaGuess = to->hessianIndex() < 0 ? 0. : thetaGuess[to->hessianIndex()]; double error = normalize_theta(-e->measurement().rotation().angle() + toThetaGuess - fromThetaGuess); bool fromNotFixed = !(from->fixed()); bool toNotFixed = !(to->fixed()); if (fromNotFixed || toNotFixed) { double omega_r = - omega * error; if (fromNotFixed) { b(from->hessianIndex()) -= omega_r; (*H.block(from->hessianIndex(), from->hessianIndex()))(0,0) += omega; if (toNotFixed) { if (from->hessianIndex() > to->hessianIndex()) (*H.block(to->hessianIndex(), from->hessianIndex()))(0,0) -= omega; else (*H.block(from->hessianIndex(), to->hessianIndex()))(0,0) -= omega; } } if (toNotFixed ) { b(to->hessianIndex()) += omega_r; (*H.block(to->hessianIndex(), to->hessianIndex()))(0,0) += omega; } } } // solve orientation typedef LinearSolverCSparse<ScalarMatrix> SystemSolver; SystemSolver linearSystemSolver; linearSystemSolver.init(); bool ok = linearSystemSolver.solve(H, x.data(), b.data()); if (!ok) { cerr << __PRETTY_FUNCTION__ << "Failure while solving linear system" << endl; return false; } // update the orientation of the 2D poses and set translation to 0, GN shall solve that root->setToOrigin(); for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) { VertexSE2* v = static_cast<VertexSE2*>(_optimizer->indexMapping()[i]); int poseIdx = v->hessianIndex(); SE2 poseUpdate(0, 0, normalize_theta(thetaGuess(poseIdx) + x(poseIdx))); v->setEstimate(poseUpdate); } return true; }