void OptimizableGraph::push()
{
  for (OptimizableGraph::VertexIDMap::iterator it=_vertices.begin(); it!=_vertices.end(); ++it) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second);
    v->push();
  }
}
void OptimizableGraph::discardTop(HyperGraph::VertexSet& vset)
{
  for (HyperGraph::VertexSet::iterator it=vset.begin(); it!=vset.end(); ++it) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
    v->discardTop();
  }
}
void OptimizableGraph::push(HyperGraph::VertexSet& vset) {
  for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end();
      it++) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
    v->push();
  }
}
  bool SparseOptimizer::buildIndexMapping
     (SparseOptimizer::VertexContainer& vlist)
  {
    if (! vlist.size())
    {
      _ivMap.clear();
      return false;
    }

    _ivMap.resize(vlist.size());
    size_t i = 0;
    // Recorre todos los vertices dandoles un indice.
    // Si el vertice es fijo, su indice sera -1
    // Para los vertices no fijos, les da un indice incremental.
    // Primero se les da a los vertices no marginalizables y luego a los que si.
    // Al final _ivMap contendra todos los vertices no fijos con los vertices
    // no marginalizables en las primeras posiciones de _ivMap
    for (int k=0; k<2; k++)
      for (VertexContainer::iterator it=vlist.begin(); it!=vlist.end(); it++)
      {
         OptimizableGraph::Vertex* v = *it;
         if (! v->fixed())
         {
            if (static_cast<int>(v->marginalized()) == k)
            {
               v->setTempIndex(i);
               _ivMap[i]=v;
               i++;
            }
         }else   v->setTempIndex(-1);
      }

    _ivMap.resize(i);
    return true;
  }
  OptimizableGraph::Vertex* SparseOptimizer::findGauge(){
    if (vertices().empty())
      return 0;

    int maxDim=0;
    for (HyperGraph::VertexIDMap::iterator
         it  = vertices().begin();
         it != vertices().end();
         it++)
    {
      OptimizableGraph::Vertex* v =
         static_cast<OptimizableGraph::Vertex*>(it->second);
      maxDim = std::max(maxDim,v->dimension());
    }

    OptimizableGraph::Vertex* rut=0;
    for (HyperGraph::VertexIDMap::iterator
         it  = vertices().begin();
         it != vertices().end();
         it++)
    {
      OptimizableGraph::Vertex* v =
         static_cast<OptimizableGraph::Vertex*>(it->second);
      if (v->dimension()==maxDim)
      {
        rut=v;
        break;
      }
    }
    return rut;
  }
  void OptimizableGraph::setFixed(HyperGraph::VertexSet& vset, bool fixed)
{
  for (HyperGraph::VertexSet::iterator it=vset.begin(); it!=vset.end(); ++it) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
    v->setFixed(fixed);
  }
}
Esempio n. 7
0
  bool SparseOptimizer::buildIndexMapping(SparseOptimizer::VertexContainer& vlist){
    if (! vlist.size()){
      _ivMap.clear();
      return false;
    }

    _ivMap.resize(vlist.size());
    size_t i = 0;
    for (int k=0; k<2; k++)
      for (VertexContainer::iterator it=vlist.begin(); it!=vlist.end(); ++it){
      OptimizableGraph::Vertex* v = *it;
      if (! v->fixed()){
        if (static_cast<int>(v->marginalized()) == k){
          v->setHessianIndex(i);
          _ivMap[i]=v;
          i++;
        }
      }
      else {
        v->setHessianIndex(-1);
      }
    }
    _ivMap.resize(i);
    return true;
  }
void OptimizableGraph::discardTop() {
  for (OptimizableGraph::VertexIDMap::iterator it = _vertices.begin();
      it != _vertices.end(); it++) {
    OptimizableGraph::Vertex* v =
        static_cast<OptimizableGraph::Vertex*>(it->second);
    v->discardTop();
  }
}
Esempio n. 9
0
bool edgeAllVertsSameDim(OptimizableGraph::Edge* e, int dim)
{
  for (size_t i = 0; i < e->vertices().size(); ++i) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(e->vertices()[i]);
    if (v->dimension() != dim)
      return false;
  }
  return true;
}
bool G2oSlamInterface::fixNode(const std::vector<int>& nodes)
{
  for (size_t i = 0; i < nodes.size(); ++i) {
    OptimizableGraph::Vertex* v = _optimizer->vertex(nodes[i]);
    if (v)
      v->setFixed(true);
  }
  return true;
}
Esempio n. 11
0
 bool SparseOptimizer::removeVertex(HyperGraph::Vertex* v)
 {
   OptimizableGraph::Vertex* vv = static_cast<OptimizableGraph::Vertex*>(v);
   if (vv->hessianIndex() >= 0) {
     clearIndexMapping();
     _ivMap.clear();
   }
   return HyperGraph::removeVertex(v);
 }
void BaseMultiEdge<D, E>::linearizeOplus(JacobianWorkspace& jacobianWorkspace)
{
  for (size_t i = 0; i < _vertices.size(); ++i) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);
    assert(v->dimension() >= 0);
    new (&_jacobianOplus[i]) JacobianType(jacobianWorkspace.workspaceForVertex(i), D, v->dimension());
  }
  linearizeOplus();
}
  void SparseOptimizer::pop(HyperGraph::VertexSet& vlist)
  {
    for (HyperGraph::VertexSet::iterator it = vlist.begin(); it != vlist.end(); ++it){
      OptimizableGraph::Vertex* v = dynamic_cast<OptimizableGraph::Vertex*> (*it);
      if (v)
	v->pop();
      else
	cerr << "FATAL POP SET" << endl;
    }
  }
 void SparseOptimizer::update(double* update)
 {
   // update the graph by calling oplus on the vertices
   for (size_t i=0; i < _ivMap.size(); ++i)
   {
     OptimizableGraph::Vertex* v = _ivMap[i];
     v->oplus(update);
     update += v->dimension();
   }
 }
Esempio n. 15
0
  void SparseOptimizer::push(HyperGraph::VertexSet& vlist)
  {
    for (HyperGraph::VertexSet::iterator it = vlist.begin(); it != vlist.end(); ++it) {
      OptimizableGraph::Vertex* v = dynamic_cast<OptimizableGraph::Vertex*>(*it);
      if (v)
	v->push();
      else 
	cerr << __FUNCTION__ << ": FATAL PUSH SET" << endl;
    }
  }
Esempio n. 16
0
 double EstimatePropagatorCostOdometry::operator()(OptimizableGraph::Edge* edge, const OptimizableGraph::VertexSet& from_, OptimizableGraph::Vertex* to_) const
 {
   OptimizableGraph::Edge* e = dynamic_cast<OptimizableGraph::Edge*>(edge);
   OptimizableGraph::Vertex* from = dynamic_cast<OptimizableGraph::Vertex*>(*from_.begin());
   OptimizableGraph::Vertex* to = dynamic_cast<OptimizableGraph::Vertex*>(to_);
   if (std::abs(from->id() - to->id()) != 1) // simple method to identify odometry edges in a pose graph
     return std::numeric_limits<double>::max();
   SparseOptimizer::EdgeContainer::const_iterator it = _graph->findActiveEdge(e);
   if (it == _graph->activeEdges().end()) // it has to be an active edge
     return std::numeric_limits<double>::max();
   return e->initialEstimatePossible(from_, to);
 }
 double OptimizationAlgorithmLevenberg::computeLambdaInit() const
 {
   if (_userLambdaInit->value() > 0)
     return _userLambdaInit->value();
   double maxDiagonal=0.;
   for (size_t k = 0; k < _optimizer->indexMapping().size(); k++) {
     OptimizableGraph::Vertex* v = _optimizer->indexMapping()[k];
     assert(v);
     int dim = v->dimension();
     for (int j = 0; j < dim; ++j){
       maxDiagonal = std::max(fabs(v->hessian(j,j)),maxDiagonal);
     }
   }
   return _tau*maxDiagonal;
 }
Esempio n. 18
0
void GraphSLAM::addNeighboringVertices(OptimizableGraph::VertexSet& vset, int gap){
  OptimizableGraph::VertexSet temp = vset;
  for (OptimizableGraph::VertexSet::iterator it = temp.begin(); it!=temp.end(); it++){
    OptimizableGraph::Vertex* vertex = (OptimizableGraph::Vertex*) *it;
    for (int i = 1; i <= gap; i++){
      OptimizableGraph::Vertex *v = (OptimizableGraph::Vertex *) _graph->vertex(vertex->id()+i);
      if (v && v->id() != _lastVertex->id()){
	OptimizableGraph::VertexSet::iterator itv = vset.find(v);
	if (itv == vset.end())
	  vset.insert(v);
	else
	  break;
      }
    }

    for (int i = 1; i <= gap; i++){
      OptimizableGraph::Vertex* v = (OptimizableGraph::Vertex*) _graph->vertex(vertex->id()-i);
      if (v && v->id() != _lastVertex->id()){
	OptimizableGraph::VertexSet::iterator itv = vset.find(v);
	if (itv == vset.end())
	  vset.insert(v);
	else
	  break;
      }
    }
  }
}
Esempio n. 19
0
bool MainWindow::prepare()
{
  SparseOptimizer* optimizer = viewer->graph;
  if (_currentOptimizationAlgorithmProperty.requiresMarginalize) {
    cerr << "Marginalizing Landmarks" << endl;
    for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer->vertices().begin(); it != optimizer->vertices().end(); ++it) {
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second);
      int vdim = v->dimension();
      v->setMarginalized((vdim == _currentOptimizationAlgorithmProperty.landmarkDim));
    }
  }
  else {
    cerr << "Preparing (no marginalization of Landmarks)" << endl;
    for (SparseOptimizer::VertexIDMap::const_iterator it = optimizer->vertices().begin(); it != optimizer->vertices().end(); ++it) {
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second);
      v->setMarginalized(false);
    }
  }
  viewer->graph->initializeOptimization();
  return true;
}
void BaseMultiEdge<D, E>::mapHessianMemory(double* d, int i, int j, bool rowMajor)
{
  int idx = internal::computeUpperTriangleIndex(i, j);
  assert(idx < (int)_hessian.size());
  OptimizableGraph::Vertex* vi = static_cast<OptimizableGraph::Vertex*>(HyperGraph::Edge::vertex(i));
  OptimizableGraph::Vertex* vj = static_cast<OptimizableGraph::Vertex*>(HyperGraph::Edge::vertex(j));
  assert(vi->dimension() >= 0);
  assert(vj->dimension() >= 0);
  HessianHelper& h = _hessian[idx];
  if (rowMajor) {
    if (h.matrix.data() != d || h.transposed != rowMajor)
      new (&h.matrix) HessianBlockType(d, vj->dimension(), vi->dimension());
  } else {
    if (h.matrix.data() != d || h.transposed != rowMajor)
      new (&h.matrix) HessianBlockType(d, vi->dimension(), vj->dimension());
  }
  h.transposed = rowMajor;
}
Esempio n. 21
0
bool BlockSolver<Traits>::updateStructure(const std::vector<HyperGraph::Vertex*>& vset, const HyperGraph::EdgeSet& edges)
{
  for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin(); vit != vset.end(); ++vit) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit);
    int dim = v->dimension();
    if (! v->marginalized()){
      v->setColInHessian(_sizePoses);
      _sizePoses+=dim;
      _Hpp->rowBlockIndices().push_back(_sizePoses);
      _Hpp->colBlockIndices().push_back(_sizePoses);
      _Hpp->blockCols().push_back(typename SparseBlockMatrix<PoseMatrixType>::IntBlockMap());
      ++_numPoses;
      int ind = v->hessianIndex();
      PoseMatrixType* m = _Hpp->block(ind, ind, true);
      v->mapHessianMemory(m->data());
    } else {
      std::cerr << "updateStructure(): Schur not supported" << std::endl;
      abort();
    }
  }
  resizeVector(_sizePoses + _sizeLandmarks);

  for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) {
    OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);

    for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
      OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx);
      int ind1 = v1->hessianIndex();
      int indexV1Bak = ind1;
      if (ind1 == -1)
        continue;
      for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
        OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx);
        int ind2 = v2->hessianIndex();
        if (ind2 == -1)
          continue;
        ind1 = indexV1Bak;
        bool transposedBlock = ind1 > ind2;
        if (transposedBlock) // make sure, we allocate the upper triangular block
          std::swap(ind1, ind2);

        if (! v1->marginalized() && !v2->marginalized()) {
          PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
          e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
        } else { 
          std::cerr << __PRETTY_FUNCTION__ << ": not supported" << std::endl;
        }
      }
    }

  }

  return true;
}
Esempio n. 22
0
  void SparseOptimizer::computeInitialGuess(EstimatePropagatorCost& costFunction)
  {
    OptimizableGraph::VertexSet emptySet;
    std::set<Vertex*> backupVertices;
    HyperGraph::VertexSet fixedVertices; // these are the root nodes where to start the initialization
    for (EdgeContainer::iterator it = _activeEdges.begin(); it != _activeEdges.end(); ++it) {
      OptimizableGraph::Edge* e = *it;
      for (size_t i = 0; i < e->vertices().size(); ++i) {
        OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(e->vertex(i));
	if (!v)
	  continue;
        if (v->fixed())
          fixedVertices.insert(v);
        else { // check for having a prior which is able to fully initialize a vertex
          for (EdgeSet::const_iterator vedgeIt = v->edges().begin(); vedgeIt != v->edges().end(); ++vedgeIt) {
            OptimizableGraph::Edge* vedge = static_cast<OptimizableGraph::Edge*>(*vedgeIt);
            if (vedge->vertices().size() == 1 && vedge->initialEstimatePossible(emptySet, v) > 0.) {
              //cerr << "Initialize with prior for " << v->id() << endl;
              vedge->initialEstimate(emptySet, v);
              fixedVertices.insert(v);
            }
          }
        }
        if (v->hessianIndex() == -1) {
          std::set<Vertex*>::const_iterator foundIt = backupVertices.find(v);
          if (foundIt == backupVertices.end()) {
            v->push();
            backupVertices.insert(v);
          }
        }
      }
    }

    EstimatePropagator estimatePropagator(this);
    estimatePropagator.propagate(fixedVertices, costFunction);

    // restoring the vertices that should not be initialized
    for (std::set<Vertex*>::iterator it = backupVertices.begin(); it != backupVertices.end(); ++it) {
      Vertex* v = *it;
      v->pop();
    }
    if (verbose()) {
      computeActiveErrors();
      cerr << "iteration= -1\t chi2= " << activeChi2()
          << "\t time= 0.0"
          << "\t cumTime= 0.0"
          << "\t (using initial guess from " << costFunction.name() << ")" << endl;
    }
  }
  bool SparseOptimizer::gaugeFreedom()
  {
    if (vertices().empty())
      return false;

    int maxDim=0;
    for (HyperGraph::VertexIDMap::iterator
         it  = vertices().begin();
         it != vertices().end();
         it++)
    {
      OptimizableGraph::Vertex* v =
         static_cast<OptimizableGraph::Vertex*>(it->second);
      maxDim = std::max(maxDim,v->dimension());
    }

    for (HyperGraph::VertexIDMap::iterator
         it  = vertices().begin();
         it != vertices().end();
         it++)
    {
      OptimizableGraph::Vertex* v =
         static_cast<OptimizableGraph::Vertex*>(it->second);
      if (v->dimension() == maxDim)
      {
        // test for full dimension prior
        for (HyperGraph::EdgeSet::const_iterator
             eit  = v->edges().begin();
             eit != v->edges().end();
             ++eit)
        {
          OptimizableGraph::Edge* e =
             static_cast<OptimizableGraph::Edge*>(*eit);
          if (e->vertices().size() == 1 && e->dimension() == maxDim)
            return false;
        }
      }
    }
    return true;
  }
  bool SparseOptimizerIncremental::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset)
  {
    if (batchStep) {
      return SparseOptimizerOnline::updateInitialization(vset, eset);
    }

    for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
      v->clearQuadraticForm(); // be sure that b is zero for this vertex
    }

    // get the touched vertices
    _touchedVertices.clear();
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
      OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
      if (! v1->fixed())
        _touchedVertices.insert(v1);
      if (! v2->fixed())
        _touchedVertices.insert(v2);
    }
    //cerr << PVAR(_touchedVertices.size()) << endl;

    // updating the internal structures
    std::vector<HyperGraph::Vertex*> newVertices;
    newVertices.reserve(vset.size());
    _activeVertices.reserve(_activeVertices.size() + vset.size());
    _activeEdges.reserve(_activeEdges.size() + eset.size());
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it)
      _activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it));
    //cerr << "updating internal done." << endl;

    // update the index mapping
    size_t next = _ivMap.size();
    for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
      OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it);
      if (! v->fixed()){
        if (! v->marginalized()){
          v->setHessianIndex(next);
          _ivMap.push_back(v);
          newVertices.push_back(v);
          _activeVertices.push_back(v);
          next++;
        } 
        else // not supported right now
          abort();
      }
      else {
        v->setHessianIndex(-1);
      }
    }
    //cerr << "updating index mapping done." << endl;

    // backup the tempindex and prepare sorting structure
    VertexBackup backupIdx[_touchedVertices.size()];
    memset(backupIdx, 0, sizeof(VertexBackup) * _touchedVertices.size());
    int idx = 0;
    for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
      backupIdx[idx].hessianIndex = v->hessianIndex();
      backupIdx[idx].vertex = v;
      backupIdx[idx].hessianData = v->hessianData();
      ++idx;
    }
    sort(backupIdx, backupIdx + _touchedVertices.size()); // sort according to the hessianIndex which is the same order as used later by the optimizer
    for (int i = 0; i < idx; ++i) {
      backupIdx[i].vertex->setHessianIndex(i);
    }
    //cerr << "backup tempindex done." << endl;

    // building the structure of the update
    _updateMat.clear(true); // get rid of the old matrix structure
    _updateMat.rowBlockIndices().clear();
    _updateMat.colBlockIndices().clear();
    _updateMat.blockCols().clear();

    // placing the current stuff in _updateMat
    MatrixXd* lastBlock = 0;
    int sizePoses = 0;
    for (int i = 0; i < idx; ++i) {
      OptimizableGraph::Vertex* v = backupIdx[i].vertex;
      int dim = v->dimension();
      sizePoses+=dim;
      _updateMat.rowBlockIndices().push_back(sizePoses);
      _updateMat.colBlockIndices().push_back(sizePoses);
      _updateMat.blockCols().push_back(SparseBlockMatrix<MatrixXd>::IntBlockMap());
      int ind = v->hessianIndex();
      //cerr << PVAR(ind) << endl;
      if (ind >= 0) {
        MatrixXd* m = _updateMat.block(ind, ind, true);
        v->mapHessianMemory(m->data());
        lastBlock = m;
      }
    }
    lastBlock->diagonal().array() += 1e-6; // HACK to get Eigen value > 0


    for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertices()[0];
      OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertices()[1];

      int ind1 = v1->hessianIndex();
      if (ind1 == -1)
        continue;
      int ind2 = v2->hessianIndex();
      if (ind2 == -1)
        continue;
      bool transposedBlock = ind1 > ind2;
      if (transposedBlock) // make sure, we allocate the upper triangular block
        swap(ind1, ind2);

      MatrixXd* m = _updateMat.block(ind1, ind2, true);
      e->mapHessianMemory(m->data(), 0, 1, transposedBlock);
    }

    // build the system into _updateMat
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge * e = static_cast<OptimizableGraph::Edge*>(*it);
      e->computeError();
    }
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      e->linearizeOplus();
    }
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      e->constructQuadraticForm();
    }

    // restore the original data for the vertex
    for (int i = 0; i < idx; ++i) {
      backupIdx[i].vertex->setHessianIndex(backupIdx[i].hessianIndex);
      if (backupIdx[i].hessianData)
        backupIdx[i].vertex->mapHessianMemory(backupIdx[i].hessianData);
    }

    // update the structure of the real block matrix
    bool solverStatus = _algorithm->updateStructure(newVertices, eset);

    bool updateStatus = computeCholeskyUpdate();
    if (! updateStatus) {
      cerr << "Error while computing update" << endl;
    }

    cholmod_sparse* updateAsSparseFactor = cholmod_factor_to_sparse(_cholmodFactor, &_cholmodCommon);

    // convert CCS update by permuting back to the permutation of L
    if (updateAsSparseFactor->nzmax > _permutedUpdate->nzmax) {
      //cerr << "realloc _permutedUpdate" << endl;
      cholmod_reallocate_triplet(updateAsSparseFactor->nzmax, _permutedUpdate, &_cholmodCommon);
    }
    _permutedUpdate->nnz = 0;
    _permutedUpdate->nrow = _permutedUpdate->ncol = _L->n;
    {
      int* Ap = (int*)updateAsSparseFactor->p;
      int* Ai = (int*)updateAsSparseFactor->i;
      double* Ax = (double*)updateAsSparseFactor->x;
      int* Bj = (int*)_permutedUpdate->j;
      int* Bi = (int*)_permutedUpdate->i;
      double* Bx = (double*)_permutedUpdate->x;
      for (size_t c = 0; c < updateAsSparseFactor->ncol; ++c) {
        const int& rbeg = Ap[c];
        const int& rend = Ap[c+1];
        int cc = c / slamDimension;
        int coff = c % slamDimension;
        const int& cbase = backupIdx[cc].vertex->colInHessian();
        const int& ccol = _perm(cbase + coff);
        for (int j = rbeg; j < rend; j++) {
          const int& r = Ai[j];
          const double& val = Ax[j];

          int rr = r / slamDimension;
          int roff = r % slamDimension;
          const int& rbase = backupIdx[rr].vertex->colInHessian();
          
          int row = _perm(rbase + roff);
          int col = ccol;
          if (col > row) // lower triangular entry
            swap(col, row);
          Bi[_permutedUpdate->nnz] = row;
          Bj[_permutedUpdate->nnz] = col;
          Bx[_permutedUpdate->nnz] = val;
          ++_permutedUpdate->nnz;
        }
      }
    }
    cholmod_free_sparse(&updateAsSparseFactor, &_cholmodCommon);

#if 0
    cholmod_sparse* updatePermuted = cholmod_triplet_to_sparse(_permutedUpdate, _permutedUpdate->nnz, &_cholmodCommon);
    //writeCCSMatrix("update-permuted.txt", updatePermuted->nrow, updatePermuted->ncol, (int*)updatePermuted->p, (int*)updatePermuted->i, (double*)updatePermuted->x, false);
    _solverInterface->choleskyUpdate(updatePermuted);
    cholmod_free_sparse(&updatePermuted, &_cholmodCommon);
#else
    convertTripletUpdateToSparse();
    _solverInterface->choleskyUpdate(_permutedUpdateAsSparse);
#endif

    return solverStatus;
  }
  int SparseOptimizerIncremental::optimize(int iterations, bool online)
  {
    (void) iterations; // we only do one iteration anyhow
    OptimizationAlgorithm* solver = _algorithm;
    solver->init(online);

    int cjIterations=0;
    bool ok=true;

    if (! online || batchStep) {
      //cerr << "performing batch step" << endl;
      if (! online) {
        ok = _underlyingSolver->buildStructure();
        if (! ok) {
          cerr << __PRETTY_FUNCTION__ << ": Failure while building CCS structure" << endl;
          return 0;
        }
      }

      // copy over the updated estimate as new linearization point
      if (slamDimension == 3) {
        for (size_t i = 0; i < indexMapping().size(); ++i) {
          OnlineVertexSE2* v = static_cast<OnlineVertexSE2*>(indexMapping()[i]);
          v->setEstimate(v->updatedEstimate);
        }
      }
      else if (slamDimension == 6) {
        for (size_t i = 0; i < indexMapping().size(); ++i) {
          OnlineVertexSE3* v= static_cast<OnlineVertexSE3*>(indexMapping()[i]);
          v->setEstimate(v->updatedEstimate);
        }
      }

      SparseOptimizer::computeActiveErrors();
      SparseOptimizer::linearizeSystem();
      _underlyingSolver->buildSystem();

      int numBlocksRequired = _ivMap.size();
      if (_cmember.size() < numBlocksRequired) {
        _cmember.resize(2 * numBlocksRequired);
      }
      memset(_cmember.data(), 0, numBlocksRequired * sizeof(int));
      if (_ivMap.size() > 100) {
        for (size_t i = _ivMap.size() - 20; i < _ivMap.size(); ++i) {
          const HyperGraph::EdgeSet& eset = _ivMap[i]->edges();
          for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
            OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
            OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
            OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
            if (v1->hessianIndex() >= 0)
              _cmember(v1->hessianIndex()) = 1;
            if (v2->hessianIndex() >= 0)
              _cmember(v2->hessianIndex()) = 1;
          }
        }
      }

      ok = _underlyingSolver->solve();

      // get the current cholesky factor along with the permutation
      _L = _solverInterface->L();
      if (_perm.size() < (int)_L->n)
        _perm.resize(2*_L->n);
      int* p = (int*)_L->Perm;
      for (size_t i = 0; i < _L->n; ++i)
        _perm[p[i]] = i;

    }
    else {
      // update the b vector
      for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
        OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
        int iBase = v->colInHessian();
        v->copyB(_underlyingSolver->b() + iBase);
      }
      _solverInterface->solve(_underlyingSolver->x(), _underlyingSolver->b());
    }

    update(_underlyingSolver->x());
    ++cjIterations; 

    if (verbose()){
      computeActiveErrors();
      cerr
        << "nodes = " << vertices().size()
        << "\t edges= " << _activeEdges.size()
        << "\t chi2= " << FIXED(activeChi2())
        << endl;
    }

    if (vizWithGnuplot)
      gnuplotVisualization();

    if (! ok)
      return 0;
    return 1;
  }
Esempio n. 26
0
bool OptimizableGraph::load(istream& is, bool createEdges)
{
  // scna for the paramers in the whole file
  if (!_parameters.read(is,&_renamedTypesLookup))
    return false;
#ifndef NDEBUG
  cerr << "Loaded " << _parameters.size() << " parameters" << endl;
#endif
  is.clear();
  is.seekg(ios_base::beg);
  set<string> warnedUnknownTypes;
  stringstream currentLine;
  string token;

  Factory* factory = Factory::instance();
  HyperGraph::GraphElemBitset elemBitset;
  elemBitset[HyperGraph::HGET_PARAMETER] = 1;
  elemBitset.flip();

  Vertex* previousVertex = 0;
  Data* previousData = 0;

  while (1) {
    int bytesRead = readLine(is, currentLine);
    if (bytesRead == -1)
      break;
    currentLine >> token;
    //cerr << "Token=" << token << endl;
    if (bytesRead == 0 || token.size() == 0 || token[0] == '#')
      continue;

    // handle commands encoded in the file
    bool handledCommand = false;
    
    if (token == "FIX") {
      handledCommand = true;
      int id;
      while (currentLine >> id) {
        OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(vertex(id));
        if (v) {
#        ifndef NDEBUG
          cerr << "Fixing vertex " << v->id() << endl;
#        endif
          v->setFixed(true);
        } else {
          cerr << "Warning: Unable to fix vertex with id " << id << ". Not found in the graph." << endl;
        }
      }
    }

    if (handledCommand)
      continue;
     
    // do the mapping to an internal type if it matches
    if (_renamedTypesLookup.size() > 0) {
      map<string, string>::const_iterator foundIt = _renamedTypesLookup.find(token);
      if (foundIt != _renamedTypesLookup.end()) {
        token = foundIt->second;
      }
    }

    if (! factory->knowsTag(token)) {
      if (warnedUnknownTypes.count(token) != 1) {
        warnedUnknownTypes.insert(token);
        cerr << CL_RED(__PRETTY_FUNCTION__ << " unknown type: " << token) << endl;
      }
      continue;
    }

    HyperGraph::HyperGraphElement* element = factory->construct(token, elemBitset);
    if (dynamic_cast<Vertex*>(element)) { // it's a vertex type
      //cerr << "it is a vertex" << endl;
      previousData = 0;
      Vertex* v = static_cast<Vertex*>(element);
      int id;
      currentLine >> id;
      bool r = v->read(currentLine);
      if (! r)
        cerr << __PRETTY_FUNCTION__ << ": Error reading vertex " << token << " " << id << endl;
      v->setId(id);
      if (!addVertex(v)) {
        cerr << __PRETTY_FUNCTION__ << ": Failure adding Vertex, " << token << " " << id << endl;
        delete v;
      } else {
        previousVertex = v;
      }
    }
Esempio n. 27
0
bool BlockSolver<Traits>::buildStructure(bool zeroBlocks)
{
  assert(_optimizer);

  size_t sparseDim = 0;
  _numPoses=0;
  _numLandmarks=0;
  _sizePoses=0;
  _sizeLandmarks=0;
  int* blockPoseIndices = new int[_optimizer->indexMapping().size()];
  int* blockLandmarkIndices = new int[_optimizer->indexMapping().size()];

  for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
    OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
    int dim = v->dimension();
    if (! v->marginalized()){
      v->setColInHessian(_sizePoses);
      _sizePoses+=dim;
      blockPoseIndices[_numPoses]=_sizePoses;
      ++_numPoses;
    } else {
      v->setColInHessian(_sizeLandmarks);
      _sizeLandmarks+=dim;
      blockLandmarkIndices[_numLandmarks]=_sizeLandmarks;
      ++_numLandmarks;
    }
    sparseDim += dim;
  }
  resize(blockPoseIndices, _numPoses, blockLandmarkIndices, _numLandmarks, sparseDim);
  delete[] blockLandmarkIndices;
  delete[] blockPoseIndices;

  // allocate the diagonal on Hpp and Hll
  int poseIdx = 0;
  int landmarkIdx = 0;
  for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
    OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
    if (! v->marginalized()){
      //assert(poseIdx == v->hessianIndex());
      PoseMatrixType* m = _Hpp->block(poseIdx, poseIdx, true);
      if (zeroBlocks)
        m->setZero();
      v->mapHessianMemory(m->data());
      ++poseIdx;
    } else {
      LandmarkMatrixType* m = _Hll->block(landmarkIdx, landmarkIdx, true);
      if (zeroBlocks)
        m->setZero();
      v->mapHessianMemory(m->data());
      ++landmarkIdx;
    }
  }
  assert(poseIdx == _numPoses && landmarkIdx == _numLandmarks);

  // temporary structures for building the pattern of the Schur complement
  SparseBlockMatrixHashMap<PoseMatrixType>* schurMatrixLookup = 0;
  if (_doSchur) {
    schurMatrixLookup = new SparseBlockMatrixHashMap<PoseMatrixType>(_Hschur->rowBlockIndices(), _Hschur->colBlockIndices());
    schurMatrixLookup->blockCols().resize(_Hschur->blockCols().size());
  }

  // here we assume that the landmark indices start after the pose ones
  // create the structure in Hpp, Hll and in Hpl
  for (SparseOptimizer::EdgeContainer::const_iterator it=_optimizer->activeEdges().begin(); it!=_optimizer->activeEdges().end(); ++it){
    OptimizableGraph::Edge* e = *it;

    for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) {
      OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx);
      int ind1 = v1->hessianIndex();
      if (ind1 == -1)
        continue;
      int indexV1Bak = ind1;
      for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) {
        OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx);
        int ind2 = v2->hessianIndex();
        if (ind2 == -1)
          continue;
        ind1 = indexV1Bak;
        bool transposedBlock = ind1 > ind2;
        if (transposedBlock){ // make sure, we allocate the upper triangle block
          std::swap(ind1, ind2);
        }
        if (! v1->marginalized() && !v2->marginalized()){
          PoseMatrixType* m = _Hpp->block(ind1, ind2, true);
          if (zeroBlocks)
            m->setZero();
          e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock);
          if (_Hschur) {// assume this is only needed in case we solve with the schur complement
            schurMatrixLookup->addBlock(ind1, ind2);
          }
        } else if (v1->marginalized() && v2->marginalized()){
          // RAINER hmm.... should we ever reach this here????
          LandmarkMatrixType* m = _Hll->block(ind1-_numPoses, ind2-_numPoses, true);
          if (zeroBlocks)
            m->setZero();
          e->mapHessianMemory(m->data(), viIdx, vjIdx, false);
        } else { 
          if (v1->marginalized()){ 
            PoseLandmarkMatrixType* m = _Hpl->block(v2->hessianIndex(),v1->hessianIndex()-_numPoses, true);
            if (zeroBlocks)
              m->setZero();
            e->mapHessianMemory(m->data(), viIdx, vjIdx, true); // transpose the block before writing to it
          } else {
            PoseLandmarkMatrixType* m = _Hpl->block(v1->hessianIndex(),v2->hessianIndex()-_numPoses, true);
            if (zeroBlocks)
              m->setZero();
            e->mapHessianMemory(m->data(), viIdx, vjIdx, false); // directly the block
          }
        }
      }
    }
  }

  if (! _doSchur)
    return true;

  _DInvSchur->diagonal().resize(landmarkIdx);
  _Hpl->fillSparseBlockMatrixCCS(*_HplCCS);

  for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) {
    OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i];
    if (v->marginalized()){
      const HyperGraph::EdgeSet& vedges=v->edges();
      for (HyperGraph::EdgeSet::const_iterator it1=vedges.begin(); it1!=vedges.end(); ++it1){
        for (size_t i=0; i<(*it1)->vertices().size(); ++i)
        {
          OptimizableGraph::Vertex* v1= (OptimizableGraph::Vertex*) (*it1)->vertex(i);
          if (v1->hessianIndex()==-1 || v1==v)
            continue;
          for  (HyperGraph::EdgeSet::const_iterator it2=vedges.begin(); it2!=vedges.end(); ++it2){
            for (size_t j=0; j<(*it2)->vertices().size(); ++j)
            {
              OptimizableGraph::Vertex* v2= (OptimizableGraph::Vertex*) (*it2)->vertex(j);
              if (v2->hessianIndex()==-1 || v2==v)
                continue;
              int i1=v1->hessianIndex();
              int i2=v2->hessianIndex();
              if (i1<=i2) {
                schurMatrixLookup->addBlock(i1, i2);
              }
            }
          }
        }
      }
    }
  }

  _Hschur->takePatternFromHash(*schurMatrixLookup);
  delete schurMatrixLookup;
  _Hschur->fillSparseBlockMatrixCCSTransposed(*_HschurTransposedCCS);

  return true;
}
Esempio n. 28
0
void BaseMultiEdge<D, E>::constructQuadraticForm()
{
  const InformationType& omega = _information;
  Matrix<double, D, 1> omega_r = - omega * _error;

  for (size_t i = 0; i < _vertices.size(); ++i) {
    OptimizableGraph::Vertex* from = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);
    bool istatus = !(from->fixed());

    if (istatus) {
      const MatrixXd& A = _jacobianOplus[i];

      MatrixXd AtO = A.transpose() * omega;
      int fromDim = from->dimension();
      Map<MatrixXd> fromMap(from->hessianData(), fromDim, fromDim);
      Map<VectorXd> fromB(from->bData(), fromDim);

      // ii block in the hessian
#ifdef G2O_OPENMP
      from->lockQuadraticForm();
#endif
      fromMap.noalias() += AtO * A;
      fromB.noalias() += A.transpose() * omega_r;

      // compute the off-diagonal blocks ij for all j
      for (size_t j = i+1; j < _vertices.size(); ++j) {
        OptimizableGraph::Vertex* to = static_cast<OptimizableGraph::Vertex*>(_vertices[j]);
#ifdef G2O_OPENMP
        to->lockQuadraticForm();
#endif
        bool jstatus = !(to->fixed());
        if (jstatus) {
          const MatrixXd& B = _jacobianOplus[j];
          int idx = computeUpperTriangleIndex(i, j);
          assert(idx < (int)_hessian.size());
          HessianHelper& hhelper = _hessian[idx];
          if (hhelper.transposed) { // we have to write to the block as transposed
            hhelper.matrix.noalias() += B.transpose() * AtO.transpose();
          } else {
            hhelper.matrix.noalias() += AtO * B;
          }
        }
#ifdef G2O_OPENMP
        to->unlockQuadraticForm();
#endif
      }

#ifdef G2O_OPENMP
      from->unlockQuadraticForm();
#endif
    }

  }

}
Esempio n. 29
0
void BaseMultiEdge<D, E>::linearizeOplus()
{
#ifdef G2O_OPENMP
  for (size_t i = 0; i < _vertices.size(); ++i) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);
    v->lockQuadraticForm();
  }
#endif

  const double delta = 1e-9;
  const double scalar = 1.0 / (2*delta);
  ErrorVector errorBak;
  ErrorVector errorBeforeNumeric = _error;

  for (size_t i = 0; i < _vertices.size(); ++i) {
    //Xi - estimate the jacobian numerically
    OptimizableGraph::Vertex* vi = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);

    if (vi->fixed())
      continue;

    const int vi_dim = vi->dimension();
#ifdef _MSC_VER
    double* add_vi = new double[vi_dim];
#else
    double add_vi[vi_dim];
#endif
    std::fill(add_vi, add_vi + vi_dim, 0.0);
    if (_jacobianOplus[i].rows() != _dimension || _jacobianOplus[i].cols() != vi_dim)
      _jacobianOplus[i].resize(_dimension, vi_dim);
    // add small step along the unit vector in each dimension
    for (int d = 0; d < vi_dim; ++d) {
      vi->push();
      add_vi[d] = delta;
      vi->oplus(add_vi);
      computeError();
      errorBak = _error;
      vi->pop();
      vi->push();
      add_vi[d] = -delta;
      vi->oplus(add_vi);
      computeError();
      errorBak -= _error;
      vi->pop();
      add_vi[d] = 0.0;

      _jacobianOplus[i].col(d) = scalar * errorBak;
    } // end dimension
#ifdef _MSC_VER
    delete[] add_vi;
#endif
  }
  _error = errorBeforeNumeric;

#ifdef G2O_OPENMP
  for (int i = (int)(_vertices.size()) - 1; i >= 0; --i) {
    OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(_vertices[i]);
    v->unlockQuadraticForm();
  }
#endif

}
Esempio n. 30
0
 void SparseOptimizer::setToOrigin(){
   for (VertexIDMap::iterator it=vertices().begin(); it!=vertices().end(); ++it) {
     OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(it->second);
     v->setToOrigin();
   }
 }