Exemplo n.º 1
0
SEXP dsCMatrix_chol(SEXP x, SEXP pivot)
{
    cholmod_factor
	*N = as_cholmod_factor(dsCMatrix_Cholesky(x, pivot,
						  ScalarLogical(FALSE),
						  ScalarLogical(FALSE)));
    /* Must use a copy; cholmod_factor_to_sparse modifies first arg. */
    cholmod_factor *Ncp = cholmod_copy_factor(N, &c);
    cholmod_sparse *L, *R;
    SEXP ans;
    
    L = cholmod_factor_to_sparse(Ncp, &c); cholmod_free_factor(&Ncp, &c);
    R = cholmod_transpose(L, /*values*/ 1, &c); cholmod_free_sparse(&L, &c);
    ans = PROTECT(chm_sparse_to_SEXP(R, /*cholmod_free*/ 1,
				     /*uploT*/ 1, /*diag*/ "N",
				     GET_SLOT(x, Matrix_DimNamesSym)));
    if (asLogical(pivot)) {
	SEXP piv = PROTECT(allocVector(INTSXP, N->n));
	int *dest = INTEGER(piv), *src = (int*)N->Perm, i;
	
	for (i = 0; i < N->n; i++) dest[i] = src[i] + 1;
	setAttrib(ans, install("pivot"), piv);
	/* FIXME: Because of the cholmod_factor -> S4 obj ->
	 * cholmod_factor conversions, the value of N->minor will
	 * always be N->n.  Change as_cholmod_factor and
	 * chm_factor_as_SEXP to keep track of Minor.
	 */
	setAttrib(ans, install("rank"), ScalarInteger((size_t) N->minor));
	UNPROTECT(1);
    }
    Free(N);
    UNPROTECT(1);
    return ans;
}
Exemplo n.º 2
0
SEXP CHMfactor_to_sparse(SEXP x)
{
    CHM_FR L = AS_CHM_FR(x), Lcp;
    CHM_SP Lm;
    R_CheckStack();

    /* cholmod_factor_to_sparse changes its first argument. Make a copy */
    Lcp = cholmod_copy_factor(L, &c);
    if (!(Lcp->is_ll))
	if (!cholmod_change_factor(Lcp->xtype, 1, 0, 1, 1, Lcp, &c))
	    error(_("cholmod_change_factor failed with status %d"), c.status);
    Lm = cholmod_factor_to_sparse(Lcp, &c); cholmod_free_factor(&Lcp, &c);
    return chm_sparse_to_SEXP(Lm, 1/*do_free*/, -1/*uploT*/, 0/*Rkind*/,
			      "N"/*non_unit*/, R_NilValue/*dimNames*/);
}
  bool SparseOptimizerIncremental::updateInitialization(HyperGraph::VertexSet& vset, HyperGraph::EdgeSet& eset)
  {
    if (batchStep) {
      return SparseOptimizerOnline::updateInitialization(vset, eset);
    }

    for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
      v->clearQuadraticForm(); // be sure that b is zero for this vertex
    }

    // get the touched vertices
    _touchedVertices.clear();
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      OptimizableGraph::Vertex* v1 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[0]);
      OptimizableGraph::Vertex* v2 = static_cast<OptimizableGraph::Vertex*>(e->vertices()[1]);
      if (! v1->fixed())
        _touchedVertices.insert(v1);
      if (! v2->fixed())
        _touchedVertices.insert(v2);
    }
    //cerr << PVAR(_touchedVertices.size()) << endl;

    // updating the internal structures
    std::vector<HyperGraph::Vertex*> newVertices;
    newVertices.reserve(vset.size());
    _activeVertices.reserve(_activeVertices.size() + vset.size());
    _activeEdges.reserve(_activeEdges.size() + eset.size());
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it)
      _activeEdges.push_back(static_cast<OptimizableGraph::Edge*>(*it));
    //cerr << "updating internal done." << endl;

    // update the index mapping
    size_t next = _ivMap.size();
    for (HyperGraph::VertexSet::iterator it = vset.begin(); it != vset.end(); ++it) {
      OptimizableGraph::Vertex* v=static_cast<OptimizableGraph::Vertex*>(*it);
      if (! v->fixed()){
        if (! v->marginalized()){
          v->setHessianIndex(next);
          _ivMap.push_back(v);
          newVertices.push_back(v);
          _activeVertices.push_back(v);
          next++;
        } 
        else // not supported right now
          abort();
      }
      else {
        v->setHessianIndex(-1);
      }
    }
    //cerr << "updating index mapping done." << endl;

    // backup the tempindex and prepare sorting structure
    VertexBackup backupIdx[_touchedVertices.size()];
    memset(backupIdx, 0, sizeof(VertexBackup) * _touchedVertices.size());
    int idx = 0;
    for (HyperGraph::VertexSet::iterator it = _touchedVertices.begin(); it != _touchedVertices.end(); ++it) {
      OptimizableGraph::Vertex* v = static_cast<OptimizableGraph::Vertex*>(*it);
      backupIdx[idx].hessianIndex = v->hessianIndex();
      backupIdx[idx].vertex = v;
      backupIdx[idx].hessianData = v->hessianData();
      ++idx;
    }
    sort(backupIdx, backupIdx + _touchedVertices.size()); // sort according to the hessianIndex which is the same order as used later by the optimizer
    for (int i = 0; i < idx; ++i) {
      backupIdx[i].vertex->setHessianIndex(i);
    }
    //cerr << "backup tempindex done." << endl;

    // building the structure of the update
    _updateMat.clear(true); // get rid of the old matrix structure
    _updateMat.rowBlockIndices().clear();
    _updateMat.colBlockIndices().clear();
    _updateMat.blockCols().clear();

    // placing the current stuff in _updateMat
    MatrixXd* lastBlock = 0;
    int sizePoses = 0;
    for (int i = 0; i < idx; ++i) {
      OptimizableGraph::Vertex* v = backupIdx[i].vertex;
      int dim = v->dimension();
      sizePoses+=dim;
      _updateMat.rowBlockIndices().push_back(sizePoses);
      _updateMat.colBlockIndices().push_back(sizePoses);
      _updateMat.blockCols().push_back(SparseBlockMatrix<MatrixXd>::IntBlockMap());
      int ind = v->hessianIndex();
      //cerr << PVAR(ind) << endl;
      if (ind >= 0) {
        MatrixXd* m = _updateMat.block(ind, ind, true);
        v->mapHessianMemory(m->data());
        lastBlock = m;
      }
    }
    lastBlock->diagonal().array() += 1e-6; // HACK to get Eigen value > 0


    for (HyperGraph::EdgeSet::const_iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      OptimizableGraph::Vertex* v1 = (OptimizableGraph::Vertex*) e->vertices()[0];
      OptimizableGraph::Vertex* v2 = (OptimizableGraph::Vertex*) e->vertices()[1];

      int ind1 = v1->hessianIndex();
      if (ind1 == -1)
        continue;
      int ind2 = v2->hessianIndex();
      if (ind2 == -1)
        continue;
      bool transposedBlock = ind1 > ind2;
      if (transposedBlock) // make sure, we allocate the upper triangular block
        swap(ind1, ind2);

      MatrixXd* m = _updateMat.block(ind1, ind2, true);
      e->mapHessianMemory(m->data(), 0, 1, transposedBlock);
    }

    // build the system into _updateMat
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge * e = static_cast<OptimizableGraph::Edge*>(*it);
      e->computeError();
    }
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      e->linearizeOplus();
    }
    for (HyperGraph::EdgeSet::iterator it = eset.begin(); it != eset.end(); ++it) {
      OptimizableGraph::Edge* e = static_cast<OptimizableGraph::Edge*>(*it);
      e->constructQuadraticForm();
    }

    // restore the original data for the vertex
    for (int i = 0; i < idx; ++i) {
      backupIdx[i].vertex->setHessianIndex(backupIdx[i].hessianIndex);
      if (backupIdx[i].hessianData)
        backupIdx[i].vertex->mapHessianMemory(backupIdx[i].hessianData);
    }

    // update the structure of the real block matrix
    bool solverStatus = _algorithm->updateStructure(newVertices, eset);

    bool updateStatus = computeCholeskyUpdate();
    if (! updateStatus) {
      cerr << "Error while computing update" << endl;
    }

    cholmod_sparse* updateAsSparseFactor = cholmod_factor_to_sparse(_cholmodFactor, &_cholmodCommon);

    // convert CCS update by permuting back to the permutation of L
    if (updateAsSparseFactor->nzmax > _permutedUpdate->nzmax) {
      //cerr << "realloc _permutedUpdate" << endl;
      cholmod_reallocate_triplet(updateAsSparseFactor->nzmax, _permutedUpdate, &_cholmodCommon);
    }
    _permutedUpdate->nnz = 0;
    _permutedUpdate->nrow = _permutedUpdate->ncol = _L->n;
    {
      int* Ap = (int*)updateAsSparseFactor->p;
      int* Ai = (int*)updateAsSparseFactor->i;
      double* Ax = (double*)updateAsSparseFactor->x;
      int* Bj = (int*)_permutedUpdate->j;
      int* Bi = (int*)_permutedUpdate->i;
      double* Bx = (double*)_permutedUpdate->x;
      for (size_t c = 0; c < updateAsSparseFactor->ncol; ++c) {
        const int& rbeg = Ap[c];
        const int& rend = Ap[c+1];
        int cc = c / slamDimension;
        int coff = c % slamDimension;
        const int& cbase = backupIdx[cc].vertex->colInHessian();
        const int& ccol = _perm(cbase + coff);
        for (int j = rbeg; j < rend; j++) {
          const int& r = Ai[j];
          const double& val = Ax[j];

          int rr = r / slamDimension;
          int roff = r % slamDimension;
          const int& rbase = backupIdx[rr].vertex->colInHessian();
          
          int row = _perm(rbase + roff);
          int col = ccol;
          if (col > row) // lower triangular entry
            swap(col, row);
          Bi[_permutedUpdate->nnz] = row;
          Bj[_permutedUpdate->nnz] = col;
          Bx[_permutedUpdate->nnz] = val;
          ++_permutedUpdate->nnz;
        }
      }
    }
    cholmod_free_sparse(&updateAsSparseFactor, &_cholmodCommon);

#if 0
    cholmod_sparse* updatePermuted = cholmod_triplet_to_sparse(_permutedUpdate, _permutedUpdate->nnz, &_cholmodCommon);
    //writeCCSMatrix("update-permuted.txt", updatePermuted->nrow, updatePermuted->ncol, (int*)updatePermuted->p, (int*)updatePermuted->i, (double*)updatePermuted->x, false);
    _solverInterface->choleskyUpdate(updatePermuted);
    cholmod_free_sparse(&updatePermuted, &_cholmodCommon);
#else
    convertTripletUpdateToSparse();
    _solverInterface->choleskyUpdate(_permutedUpdateAsSparse);
#endif

    return solverStatus;
  }
  void MultivariateFNormalSufficientSparse::set_Sigma(
          const SparseMatrix<double>& Sigma)
  {
        if (Sigma.cols() != Sigma.rows()) {
            IMP_THROW("need a square matrix!", ModelException);
            }
        //std::cout << "set_sigma" << std::endl;
        if (Sigma_) cholmod_free_sparse(&Sigma_, c_);
        cholmod_sparse A(Eigen::viewAsCholmod(
                            Sigma.selfadjointView<Eigen::Upper>()));
        Sigma_=cholmod_copy_sparse(&A, c_);
        //cholmod_print_sparse(Sigma_,"Sigma",c_);
        IMP_LOG(TERSE, "MVNsparse:   set Sigma to new matrix" << std::endl);
        IMP_LOG(TERSE, "MVNsparse:   computing Cholesky decomposition"
                << std::endl);
        // compute Cholesky decomposition for determinant and inverse
        //c_->final_asis=1; // setup LDLT calculation
        //c_->supernodal = CHOLMOD_SIMPLICIAL;
        // convert matrix to cholmod format
        //symbolic and numeric factorization
        L_ = cholmod_analyze(Sigma_, c_);
        int success = cholmod_factorize(Sigma_, L_, c_);
        //cholmod_print_factor(L_,"L",c_);

        if (success == 0 || L_->minor < L_->n)
            IMP_THROW("Sigma matrix is not positive semidefinite!",
                    ModelException);
        // determinant and derived constants
        cholmod_factor *Lcp(cholmod_copy_factor(L_, c_));
        cholmod_sparse *Lsp(cholmod_factor_to_sparse(Lcp,c_));
        double logDetSigma=0;
        if ((Lsp->itype != CHOLMOD_INT) &&
                (Lsp->xtype != CHOLMOD_REAL))
            IMP_THROW("types are not int and real, update them here first",
                    ModelException);
        int *p=(int*) Lsp->p;
        double *x=(double*) Lsp->x;
        for (size_t i=0; i < (size_t) M_; ++i)
            logDetSigma += std::log(x[p[i]]);
        cholmod_free_sparse(&Lsp,c_);
        cholmod_free_factor(&Lcp,c_);
        IMP_LOG(TERSE, "MVNsparse:   log det(Sigma) = "
                << logDetSigma << std::endl);
        IMP_LOG(TERSE, "MVNsparse:   det(Sigma) = "
                << exp(logDetSigma) << std::endl);
        norm_= std::pow(2*IMP::PI, -double(N_*M_)/2.0)
                    * exp(-double(N_)/2.0*logDetSigma);
        lnorm_=double(N_*M_)/2 * log(2*IMP::PI) + double(N_)/2 * logDetSigma;
        IMP_LOG(TERSE, "MVNsparse:   norm = " << norm_ << " lnorm = "
                << lnorm_ << std::endl);
        //inverse
        IMP_LOG(TERSE, "MVNsparse:   solving for inverse" << std::endl);
        cholmod_sparse* id = cholmod_speye(M_,M_,CHOLMOD_REAL,c_);
        if (P_) cholmod_free_sparse(&P_, c_);
        P_ = cholmod_spsolve(CHOLMOD_A, L_, id, c_);
        cholmod_free_sparse(&id, c_);
        if (!P_) IMP_THROW("Unable to solve for inverse!", ModelException);
        //WP
        IMP_LOG(TERSE, "MVNsparse:   solving for PW" << std::endl);
        if (PW_) cholmod_free_sparse(&PW_, c_);
        PW_ = cholmod_spsolve(CHOLMOD_A, L_, W_, c_);
        if (!PW_) IMP_THROW("Unable to solve for PW!", ModelException);
        IMP_LOG(TERSE, "MVNsparse:   done" << std::endl);
  }