Ejemplo n.º 1
0
int
main (int argc, const char* argv[])
{
  if (argc <= 1) {
    std::cerr << "Error: no probabilistic graphical model was given." ;
    std::cerr << std::endl << usage << std::endl;
    exit (EXIT_FAILURE);
  }
  int idx = readHorusFlags (argc, argv);
  Horus::FactorGraph fg;
  readFactorGraph (fg, argv[idx]);
  Horus::VarIds queryIds
      = readQueryAndEvidence (fg, argc, argv, idx + 1);
  if (Horus::FactorGraph::exportToLibDai()) {
    fg.exportToLibDai ("model.fg");
  }
  if (Horus::FactorGraph::exportToUai()) {
    fg.exportToUai ("model.uai");
  }
  if (Horus::FactorGraph::exportGraphViz()) {
    fg.exportToGraphViz ("model.dot");
  }
  if (Horus::FactorGraph::printFactorGraph()) {
    fg.print();
  }
  if (Horus::Globals::verbosity > 0) {
    std::cout << "factor graph contains " ;
    std::cout << fg.nrVarNodes() << " variables and " ;
    std::cout << fg.nrFacNodes() << " factors " << std::endl;
  }
  runSolver (fg, queryIds);
  return 0;
}
Ejemplo n.º 2
0
Params
BeliefProp::getFactorJoint (
    FacNode* fn,
    const VarIds& jointVarIds)
{
  if (runned_ == false) {
    runSolver();
  }
  Factor res (fn->factor());
  const BpLinks& links = ninf(fn)->getLinks();
  for (size_t i = 0; i < links.size(); i++) {
    Factor msg ({links[i]->varNode()->varId()},
                {links[i]->varNode()->range()},
                getVarToFactorMsg (links[i]));
    res.multiply (msg);
  }
  res.sumOutAllExcept (jointVarIds);
  res.reorderArguments (jointVarIds);
  res.normalize();
  Params jointDist = res.params();
  if (Globals::logDomain) {
    Util::exp (jointDist);
  }
  return jointDist; 
}
Ejemplo n.º 3
0
Params
BeliefProp::getPosterioriOf (VarId vid)
{
  if (runned_ == false) {
    runSolver();
  }
  assert (fg.getVarNode (vid));
  VarNode* var = fg.getVarNode (vid);
  Params probs;
  if (var->hasEvidence()) {
    probs.resize (var->range(), LogAware::noEvidence());
    probs[var->getEvidence()] = LogAware::withEvidence();
  } else {
    probs.resize (var->range(), LogAware::multIdenty());
    const BpLinks& links = ninf(var)->getLinks();
    if (Globals::logDomain) {
      for (size_t i = 0; i < links.size(); i++) {
        probs += links[i]->message();
      }
      LogAware::normalize (probs);
      Util::exp (probs);
    } else {
      for (size_t i = 0; i < links.size(); i++) {
        probs *= links[i]->message();
      }
      LogAware::normalize (probs);
    }
  }
  return probs;
}
Ejemplo n.º 4
0
Params
BeliefProp::getJointDistributionOf (const VarIds& jointVarIds)
{
  if (runned_ == false) {
    runSolver();
  }
  VarNode* vn = fg.getVarNode (jointVarIds[0]);
  const FacNodes& facNodes = vn->neighbors();
  size_t idx = facNodes.size();
  for (size_t i = 0; i < facNodes.size(); i++) {
    if (facNodes[i]->factor().contains (jointVarIds)) {
      idx = i;
      break;
    }
  }
  if (idx == facNodes.size()) {
    return getJointByConditioning (jointVarIds);
  }
  return getFactorJoint (facNodes[idx], jointVarIds);
}
Ejemplo n.º 5
0
/*
 * Use gradient-projection to solve an instance of
 * the Variable Placement with Separation Constraints problem.
 */
unsigned GradientProjection::solve(
        valarray<double> const &linearCoefficients, 
        valarray<double> &x) {
    COLA_ASSERT(linearCoefficients.size()==x.size());
    COLA_ASSERT(x.size()==denseSize);
    COLA_ASSERT(numStaticVars>=denseSize);
    COLA_ASSERT(sparseQ==nullptr || 
                (sparseQ!=nullptr && (vars.size()==sparseQ->rowSize())) );

    if(max_iterations==0) return 0;

    bool converged=false;

    solver = setupVPSC();
#ifdef MOSEK_AVAILABLE
    if(solveWithMosek==Outer) {
        float* ba=new float[vars.size()];
        float* xa=new float[vars.size()];
        for(unsigned i=0;i<vars.size();i++) {
            ba[i]=-linearCoefficients[i];
        }
        mosek_quad_solve_sep(menv,ba,xa);
        for(unsigned i=0;i<vars.size();i++) {
            //printf("mosek result x[%d]=%f\n",i,xa[i]);
            x[i]=xa[i];
        }
        delete [] ba;
        delete [] xa;
        return 1;
    }
#endif
    // it may be that we have to consider dummy vars, which the caller didn't know
    // about.  Thus vars.size() may not equal x.size()
    unsigned n = vars.size();
    valarray<double> b(n);
    result.resize(n);
    
    // load desired positions into vars, note that we keep desired positions 
    // already calculated for dummy vars
    for (unsigned i=0;i<x.size();i++) {
        COLA_ASSERT(!isNaN(x[i]));
        COLA_ASSERT(isFinite(x[i]));
        b[i]=i<linearCoefficients.size()?linearCoefficients[i]:0;
        result[i]=x[i];
        if(scaling) {
            b[i]*=vars[i]->scale;
            result[i]/=vars[i]->scale;
        } 
        if(!vars[i]->fixedDesiredPosition) vars[i]->desiredPosition=result[i];
    }
    runSolver(result);
        
    valarray<double> g(n); /* gradient */
    valarray<double> previous(n); /* stored positions */
    valarray<double> d(n); /* actual descent vector */

#ifdef CHECK_CONVERGENCE_BY_COST
    double previousCost = DBL_MAX;
#endif
    unsigned counter=0;
    double stepSize;
    for (; counter<max_iterations&&!converged; counter++) {
        previous=result;
        stepSize=0;
        double alpha=computeSteepestDescentVector(b,result,g);

        //printf("Iteration[%d]\n",counter);
        // move to new unconstrained position
        for (unsigned i=0; i<n; i++) {
            // dividing by variable weight is a cheap trick to make these
            // weights mean something in terms of the descent vector
            double step=alpha*g[i]/vars[i]->weight;
            result[i]+=step;
            //printf("   after unconstrained step: x[%d]=%f\n",i,result[i]);
            stepSize+=step*step;
            COLA_ASSERT(!isNaN(result[i]));
            COLA_ASSERT(isFinite(result[i]));
            if(!vars[i]->fixedDesiredPosition) vars[i]->desiredPosition=result[i];
        }

        //project to constraint boundary
        bool constrainedOptimum = false;
        constrainedOptimum=runSolver(result);
        stepSize=0;
        for (unsigned i=0;i<n;i++) {
            double step = previous[i]-result[i];
            stepSize+=step*step;
        }
        //constrainedOptimum=false;
        // beta seems, more often than not, to be >1!
        if(constrainedOptimum) {
            // The following step limits the step-size in the feasible
            // direction
            d = result - previous;
            const double beta = 0.5*computeStepSize(g, d);
            // beta > 1.0 takes us back outside the feasible region
            // beta < 0 clearly not useful and may happen due to numerical imp.
            //printf("beta=%f\n",beta);
            if(beta>0&&beta<0.99999) {
                stepSize=0;
                for (unsigned i=0; i<n; i++) {
                    double step=beta*d[i];
                    result[i]=previous[i]+step;
                    stepSize+=step*step;
                }
            }
        }
#ifdef CHECK_CONVERGENCE_BY_COST
        /* This would be the slow way to detect convergence */
        //if(counter%2) {
            double cost = computeCost(b,result);
            printf("     gp[%d] %.15f %.15f\n",counter,previousCost,cost);
            //COLA_ASSERT(previousCost>cost);
            if(fabs(previousCost - cost) < tolerance) {
                converged = true;
            }
            previousCost = cost;
        //}
#else
        if(stepSize<tolerance) converged = true; 
#endif
    }
    //printf("GP[%d] converged after %d iterations.\n",k,counter);
    for(unsigned i=0;i<x.size();i++) {
        x[i]=result[i];
        if(scaling) {
            x[i]*=vars[i]->scale;
        }
    }
    destroyVPSC(solver);
    return counter;
}