bool SparseOptimizer::buildIndexMapping (SparseOptimizer::VertexContainer& vlist) { if (! vlist.size()) { _ivMap.clear(); return false; } _ivMap.resize(vlist.size()); size_t i = 0; // Recorre todos los vertices dandoles un indice. // Si el vertice es fijo, su indice sera -1 // Para los vertices no fijos, les da un indice incremental. // Primero se les da a los vertices no marginalizables y luego a los que si. // Al final _ivMap contendra todos los vertices no fijos con los vertices // no marginalizables en las primeras posiciones de _ivMap for (int k=0; k<2; k++) for (VertexContainer::iterator it=vlist.begin(); it!=vlist.end(); it++) { OptimizableGraph::Vertex* v = *it; if (! v->fixed()) { if (static_cast<int>(v->marginalized()) == k) { v->setTempIndex(i); _ivMap[i]=v; i++; } }else v->setTempIndex(-1); } _ivMap.resize(i); return true; }
bool SparseOptimizer::buildIndexMapping(SparseOptimizer::VertexContainer& vlist){ if (! vlist.size()){ _ivMap.clear(); return false; } _ivMap.resize(vlist.size()); size_t i = 0; for (int k=0; k<2; k++) for (VertexContainer::iterator it=vlist.begin(); it!=vlist.end(); ++it){ OptimizableGraph::Vertex* v = *it; if (! v->fixed()){ if (static_cast<int>(v->marginalized()) == k){ v->setHessianIndex(i); _ivMap[i]=v; i++; } } else { v->setHessianIndex(-1); } } _ivMap.resize(i); return true; }
bool BlockSolver<Traits>::updateStructure(const std::vector<HyperGraph::Vertex*>& vset, const HyperGraph::EdgeSet& edges) { for (std::vector<HyperGraph::Vertex*>::const_iterator vit = vset.begin(); vit != vset.end(); ++vit) { OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*vit); int dim = v->dimension(); if (! v->marginalized()){ v->setColInHessian(_sizePoses); _sizePoses+=dim; _Hpp->rowBlockIndices().push_back(_sizePoses); _Hpp->colBlockIndices().push_back(_sizePoses); _Hpp->blockCols().push_back(typename SparseBlockMatrix<PoseMatrixType>::IntBlockMap()); ++_numPoses; int ind = v->hessianIndex(); PoseMatrixType* m = _Hpp->block(ind, ind, true); v->mapHessianMemory(m->data()); } else { std::cerr << "updateStructure(): Schur not supported" << std::endl; abort(); } } resizeVector(_sizePoses + _sizeLandmarks); for (HyperGraph::EdgeSet::const_iterator it = edges.begin(); it != edges.end(); ++it) { OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it); for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) { OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx); int ind1 = v1->hessianIndex(); int indexV1Bak = ind1; if (ind1 == -1) continue; for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) { OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx); int ind2 = v2->hessianIndex(); if (ind2 == -1) continue; ind1 = indexV1Bak; bool transposedBlock = ind1 > ind2; if (transposedBlock) // make sure, we allocate the upper triangular block std::swap(ind1, ind2); if (! v1->marginalized() && !v2->marginalized()) { PoseMatrixType* m = _Hpp->block(ind1, ind2, true); e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock); } else { std::cerr << __PRETTY_FUNCTION__ << ": not supported" << std::endl; } } } } return true; }
bool BlockSolver<Traits>::buildStructure(bool zeroBlocks) { assert(_optimizer); size_t sparseDim = 0; _numPoses=0; _numLandmarks=0; _sizePoses=0; _sizeLandmarks=0; int* blockPoseIndices = new int[_optimizer->indexMapping().size()]; int* blockLandmarkIndices = new int[_optimizer->indexMapping().size()]; for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) { OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i]; int dim = v->dimension(); if (! v->marginalized()){ v->setColInHessian(_sizePoses); _sizePoses+=dim; blockPoseIndices[_numPoses]=_sizePoses; ++_numPoses; } else { v->setColInHessian(_sizeLandmarks); _sizeLandmarks+=dim; blockLandmarkIndices[_numLandmarks]=_sizeLandmarks; ++_numLandmarks; } sparseDim += dim; } resize(blockPoseIndices, _numPoses, blockLandmarkIndices, _numLandmarks, sparseDim); delete[] blockLandmarkIndices; delete[] blockPoseIndices; // allocate the diagonal on Hpp and Hll int poseIdx = 0; int landmarkIdx = 0; for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) { OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i]; if (! v->marginalized()){ //assert(poseIdx == v->hessianIndex()); PoseMatrixType* m = _Hpp->block(poseIdx, poseIdx, true); if (zeroBlocks) m->setZero(); v->mapHessianMemory(m->data()); ++poseIdx; } else { LandmarkMatrixType* m = _Hll->block(landmarkIdx, landmarkIdx, true); if (zeroBlocks) m->setZero(); v->mapHessianMemory(m->data()); ++landmarkIdx; } } assert(poseIdx == _numPoses && landmarkIdx == _numLandmarks); // temporary structures for building the pattern of the Schur complement SparseBlockMatrixHashMap<PoseMatrixType>* schurMatrixLookup = 0; if (_doSchur) { schurMatrixLookup = new SparseBlockMatrixHashMap<PoseMatrixType>(_Hschur->rowBlockIndices(), _Hschur->colBlockIndices()); schurMatrixLookup->blockCols().resize(_Hschur->blockCols().size()); } // here we assume that the landmark indices start after the pose ones // create the structure in Hpp, Hll and in Hpl for (SparseOptimizer::EdgeContainer::const_iterator it=_optimizer->activeEdges().begin(); it!=_optimizer->activeEdges().end(); ++it){ OptimizableGraph::Edge* e = *it; for (size_t viIdx = 0; viIdx < e->vertices().size(); ++viIdx) { OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertex(viIdx); int ind1 = v1->hessianIndex(); if (ind1 == -1) continue; int indexV1Bak = ind1; for (size_t vjIdx = viIdx + 1; vjIdx < e->vertices().size(); ++vjIdx) { OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertex(vjIdx); int ind2 = v2->hessianIndex(); if (ind2 == -1) continue; ind1 = indexV1Bak; bool transposedBlock = ind1 > ind2; if (transposedBlock){ // make sure, we allocate the upper triangle block std::swap(ind1, ind2); } if (! v1->marginalized() && !v2->marginalized()){ PoseMatrixType* m = _Hpp->block(ind1, ind2, true); if (zeroBlocks) m->setZero(); e->mapHessianMemory(m->data(), viIdx, vjIdx, transposedBlock); if (_Hschur) {// assume this is only needed in case we solve with the schur complement schurMatrixLookup->addBlock(ind1, ind2); } } else if (v1->marginalized() && v2->marginalized()){ // RAINER hmm.... should we ever reach this here???? LandmarkMatrixType* m = _Hll->block(ind1-_numPoses, ind2-_numPoses, true); if (zeroBlocks) m->setZero(); e->mapHessianMemory(m->data(), viIdx, vjIdx, false); } else { if (v1->marginalized()){ PoseLandmarkMatrixType* m = _Hpl->block(v2->hessianIndex(),v1->hessianIndex()-_numPoses, true); if (zeroBlocks) m->setZero(); e->mapHessianMemory(m->data(), viIdx, vjIdx, true); // transpose the block before writing to it } else { PoseLandmarkMatrixType* m = _Hpl->block(v1->hessianIndex(),v2->hessianIndex()-_numPoses, true); if (zeroBlocks) m->setZero(); e->mapHessianMemory(m->data(), viIdx, vjIdx, false); // directly the block } } } } } if (! _doSchur) return true; _DInvSchur->diagonal().resize(landmarkIdx); _Hpl->fillSparseBlockMatrixCCS(*_HplCCS); for (size_t i = 0; i < _optimizer->indexMapping().size(); ++i) { OptimizableGraph::Vertex* v = _optimizer->indexMapping()[i]; if (v->marginalized()){ const HyperGraph::EdgeSet& vedges=v->edges(); for (HyperGraph::EdgeSet::const_iterator it1=vedges.begin(); it1!=vedges.end(); ++it1){ for (size_t i=0; i<(*it1)->vertices().size(); ++i) { OptimizableGraph::Vertex* v1= (OptimizableGraph::Vertex*) (*it1)->vertex(i); if (v1->hessianIndex()==-1 || v1==v) continue; for (HyperGraph::EdgeSet::const_iterator it2=vedges.begin(); it2!=vedges.end(); ++it2){ for (size_t j=0; j<(*it2)->vertices().size(); ++j) { OptimizableGraph::Vertex* v2= (OptimizableGraph::Vertex*) (*it2)->vertex(j); if (v2->hessianIndex()==-1 || v2==v) continue; int i1=v1->hessianIndex(); int i2=v2->hessianIndex(); if (i1<=i2) { schurMatrixLookup->addBlock(i1, i2); } } } } } } } _Hschur->takePatternFromHash(*schurMatrixLookup); delete schurMatrixLookup; _Hschur->fillSparseBlockMatrixCCSTransposed(*_HschurTransposedCCS); return true; }