コード例 #1
0
ファイル: Solver.C プロジェクト: lokdlok/Numberjack
lbool Solver::solve(const vec<Lit>& assumps)
{
  //start_time = getRunTime();

    model.clear();
    conflict.clear();

    if (!ok) return false;

    assumps.copyTo(assumptions);

    double  nof_conflicts = restart_first;
    double  nof_learnts   = nClauses() * learntsize_factor;
    lbool   status        = l_Undef;

    if (verbosity >= 1){
        reportf("============================[ Search Statistics ]==============================\n");
        reportf("| Conflicts |          ORIGINAL         |          LEARNT          | Progress |\n");
        reportf("|           |    Vars  Clauses Literals |    Limit  Clauses Lit/Cl |          |\n");
        reportf("===============================================================================\n");
    }

    // Search:
    bool reached_limit = false;
    while (status == l_Undef && !reached_limit){
        if (verbosity >= 1)
            reportf("| %9d | %7d %8d %8d | %8d %8d %6.0f | %6.3f %% |\n", (int)conflicts, order_heap.size(), nClauses(), (int)clauses_literals, (int)nof_learnts, nLearnts(), (double)learnts_literals/nLearnts(), progress_estimate*100), fflush(stdout);
        status = search((int)nof_conflicts, (int)nof_learnts);

      reached_limit = limitsExpired();

        nof_conflicts *= restart_inc;
        nof_learnts   *= learntsize_inc;
    }

 
    if (verbosity >= 1)
        reportf("===============================================================================\n");


    if (status == l_True){
        // Extend & copy model:
        model.growTo(nVars());
        for (int i = 0; i < nVars(); i++) model[i] = value(i);
#ifndef NDEBUG
        verifyModel();
#endif
    }else if(status == l_False) {
      //assert(status == l_False);
      if (conflict.size() == 0)
	ok = false;
    } // else {

//       // limit reached
//     }

    //cancelUntil(init_level);
    return status; // == l_True;
}
コード例 #2
0
ファイル: Solver.C プロジェクト: TatianaBatura/link-grammar
lbool Solver::solve(const vec<Lit>& assumps)
{
  model.clear();
  conflict.clear();

  if (!ok) {
    return false;
  }

  assumps.copyTo(assumptions);

  double  nof_conflicts = restart_first;
  double  nof_learnts   = nClauses() * learntsize_factor;
  lbool   status        = l_Undef;

  if (verbosity >= 1){
    reportf("============================[ Search Statistics ]==============================\n");
    reportf("| Conflicts |          ORIGINAL         |          LEARNT          | Progress |\n");
    reportf("|           |    Vars  Clauses Literals |    Limit  Clauses Lit/Cl |          |\n");
    reportf("===============================================================================\n");
  }

  // Search:
  while (status == l_Undef){
    if (verbosity >= 1)
      reportf("| .%9d. | .%7d. .%8d. .%8d. | .%8d. .%8d. .%6.0f. | .%6.3f. %% |\n", (int)conflicts, order_heap.size(), nClauses(), (int)clauses_literals, (int)nof_learnts, nLearnts(), (double)learnts_literals/nLearnts(), progress_estimate*100), fflush(stdout);
    status = search((int)nof_conflicts, (int)nof_learnts);
    nof_conflicts *= restart_inc;
    nof_learnts   *= learntsize_inc;
  }

  if (verbosity >= 1)
    reportf("===============================================================================\n");


  if (status == l_True){
    // Extend & copy model:
    model.growTo(nVars());
    for (int i = 0; i < nVars(); i++) 
      model[i] = value(i);
    //    printTrail();
#ifdef _DEBUG
    verifyModel();
#endif
  }else{
    if (conflict.size() == 0) {
      ok = false;
    }
  }

  //  cancelUntil(0);
  return status;
}
コード例 #3
0
lbool SimpSolver::solve(const vec<Lit>& assumps, bool do_simp, bool turn_off_simp) {
    vec<Var> extra_frozen;
    lbool     result = l_True;

    do_simp &= use_simplification;
    do_simp &= (decisionLevel() == 0);

    if (do_simp){

        // Assumptions must be temporarily frozen to run variable elimination:
        for (int i = 0; i < assumps.size(); i++){
            Var v = var(assumps[i]);

            // If an assumption has been eliminated, remember it.
            if (isEliminated(v))
                remember(v);

            if (!frozen[v]){
                // Freeze and store.
                setFrozen(v, true);
                extra_frozen.push(v);
            } }

	if(eliminate(turn_off_simp))
	  result = l_True;
	else
	  result = l_False;
    }

    if (result == l_True) 
      result = Solver::solve(assumps);

    if (result == l_True) {
        extendModel();
#ifndef NDEBUG
        verifyModel();
#endif
    }

    if (do_simp)
        // Unfreeze the assumptions that were frozen:
        for (int i = 0; i < extra_frozen.size(); i++)
            setFrozen(extra_frozen[i], false);

    return result;
}
コード例 #4
0
ファイル: Solver.C プロジェクト: msoos/glucosetrack
bool Solver::solve(const vec<Lit>& assumps)
{
    model.clear();
    conflict.clear();

    nbDecisionLevelHistory.initSize(100);
    totalSumOfDecisionLevel = 0;

    if (!ok) return false;

    assumps.copyTo(assumptions);

    double nof_conflicts = restart_first;
    nof_learnts   = nClauses() * learntsize_factor;

    if(nof_learnts <nbclausesbeforereduce) {
      nbclausesbeforereduce = (nof_learnts/2 < 5000) ? 5000 : nof_learnts/2;
    }
    lbool   status        = l_Undef;

    if (verbosity >= 1){
        reportf("============================[ Search Statistics ]==============================\n");
        reportf("| Conflicts |          ORIGINAL         |          LEARNT          | Progress |\n");
        reportf("|           |    Vars  Clauses Literals |    Limit  Clauses Lit/Cl |          |\n");
        reportf("===============================================================================\n");
    }

    // Search:
    while (status == l_Undef){
        if (verbosity >= 1)
            reportf("| %9d | %7d %8d %8d | %8d %8d %6.0f | %6.3f %% |\n", (int)conflicts, order_heap.size(), nClauses(), (int)clauses_literals, (int)nof_learnts, nLearnts(), (double)learnts_literals/nLearnts(), progress_estimate*100), fflush(stdout);
        status = search((int)nof_conflicts, (int)nof_learnts);
        nof_conflicts *= restart_inc;
 //LS mis dans reduceDB lui meme       nof_learnts   *= learntsize_inc;


    }

    if (verbosity >= 1)
        reportf("===============================================================================\n");


    if (status == l_True){
        // Extend & copy model:
        model.growTo(nVars());
        for (int i = 0; i < nVars(); i++) model[i] = value(i);
#ifndef NDEBUG
        verifyModel();
#endif
    }else{
        assert(status == l_False);
        if (conflict.size() == 0)
            ok = false;
    }
#ifdef LS_STATS_NBBUMP
        for(int i=0;i<learnts.size();i++)
                printf("## %d %d %d\n",learnts[i]->size(),learnts[i]->activity(),(unsigned int)learnts[i]->nbBump());
#endif
    cancelUntil(0);
    return status == l_True;
}
コード例 #5
0
ファイル: SimpSMTSolver.C プロジェクト: aehyvari/OpenSMT2
lbool SimpSMTSolver::solve( const vec< Lit > & assumps
    , const unsigned conflicts
    , bool do_simp
    , bool turn_off_simp)
{
  vec<Var> extra_frozen;
  bool     result = true;

  if ( config.sat_preprocess_theory == 0 )
    goto skip_theory_preproc;

  opensmt_error( "preprocess theory has been temporairly disabled in this version" );

skip_theory_preproc:

  // Added Code
  //=================================================================================================

  do_simp &= use_simplification;

  if (do_simp)
  {
    // Assumptions must be temporarily frozen to run variable elimination:
    for (int i = 0; i < assumps.size(); i++)
    {
      Var v = var(assumps[i]);

      // If an assumption has been eliminated, remember it.
      if (isEliminated(v))
	remember(v);

      if (!frozen[v])
      {
	// Freeze and store.
	setFrozen(v, true);
	extra_frozen.push(v);
      }
    }

    result = eliminate(turn_off_simp);
  }

#ifdef STATISTICS
  CoreSMTSolver::preproc_time = cpuTime( );
#endif

  lbool lresult = l_Undef;
  if (result)
    lresult = CoreSMTSolver::solve(assumps, conflicts);
  else
    lresult = l_False;

  if (lresult == l_True)
  {
    extendModel();
    // Previous line
    // #ifndef NDEBUG
#ifndef SMTCOMP
    verifyModel();
#endif
  }

  if (do_simp)
    // Unfreeze the assumptions that were frozen:
    for (int i = 0; i < extra_frozen.size(); i++)
      setFrozen(extra_frozen[i], false);

  return lresult;
}
コード例 #6
0
ファイル: MiniSATP.C プロジェクト: dreal-deps/opensmt
bool MiniSATP::solve(const vec<Lit>& assumps)
{
    // Added Line
    initExpDup( );

    model.clear();
    conflict.clear();

    if (!ok) { 
      // Added Line
      doneExpDup( );

      return false;
    }

    assumps.copyTo(assumptions);

    double  nof_conflicts = restart_first;
    double  nof_learnts   = nClauses() * learntsize_factor;
    lbool   status        = l_Undef;

    if (verbosity >= 1){
        reportf("============================[ Search Statistics ]==============================\n");
        reportf("| Conflicts |          ORIGINAL         |          LEARNT          | Progress |\n");
        reportf("|           |    Vars  Clauses Literals |    Limit  Clauses Lit/Cl |          |\n");
        reportf("===============================================================================\n");
    }

    // Search:
    while (status == l_Undef){
        if (verbosity >= 1)
            reportf("| %9d | %7d %8d %8d | %8d %8d %6.0f | %6.3f %% |\n", (int)conflicts, order_heap.size(), nClauses(), (int)clauses_literals, (int)nof_learnts, nLearnts(), (double)learnts_literals/nLearnts(), progress_estimate*100), fflush(stdout);
        status = search((int)nof_conflicts, (int)nof_learnts);
        nof_conflicts *= restart_inc;
        nof_learnts   *= learntsize_inc;
    }

    if (verbosity >= 1)
        reportf("===============================================================================\n");


    if (status == l_True){
        // Extend & copy model:
        model.growTo(nVars());
        for (int i = 0; i < nVars(); i++) model[i] = value(i);
#ifndef NDEBUG
        verifyModel();
#endif
    }else{
        assert(status == l_False);
        if (conflict.size() == 0)
            ok = false;
    }

    cancelUntil(0);

    // cerr << "SOLVE: Memory after: " << memUsed( ) / 1024.0 / 1024.0 << endl;
    
    // Added Line
    doneExpDup( );
    assert( status == l_True || !explanation.empty( ) );

    return status == l_True;
}
コード例 #7
0
size_t Ransac::runRANSAC(const size_t maxIterationCount, const size_t minInlierCount, const double alarmRatio, const bool isProsacSampling, const double threshold)
{
	const size_t availableSampleSetSize = usedSampleSize_ > 0 ? std::max(usedSampleSize_, minimalSampleSize_) : minimalSampleSize_;
	if (totalSampleSize_ < availableSampleSetSize)
		return -1;

	if (isProsacSampling) sortSamples();

	size_t maxIteration = maxIterationCount;

	size_t inlierCount = 0;
	inlierFlags_.resize(totalSampleSize_, false);
	std::vector<bool> currInlierFlags(totalSampleSize_, false);

	std::vector<size_t> indices(availableSampleSetSize, -1);

	// TODO [check] >>
	//size_t prosacSampleCount = 10;
	size_t prosacSampleCount = availableSampleSetSize + 10;
	iteration_ = 0;
	while (iteration_ < maxIteration && inlierCount < minInlierCount)
	{
		// Draw a sample.
		if (isProsacSampling)
		{
			drawRandomSample(prosacSampleCount, availableSampleSetSize, true, indices);

			// This incrementing strategy is naive and simple but works just fine most of the time.
			if (prosacSampleCount < totalSampleSize_)
				++prosacSampleCount;
		}
		else drawRandomSample(totalSampleSize_, availableSampleSetSize, false, indices);

		// Estimate a model.
		if (estimateModel(indices) && verifyModel())
		{
			// Evaluate a model.
			const size_t currInlierCount = lookForInliers(currInlierFlags, threshold);

			if (currInlierCount > inlierCount)
			{
				const double inlierRatio = double(currInlierCount) / totalSampleSize_;
				const size_t newMaxIteration = (size_t)std::floor(std::log(alarmRatio) / std::log(1.0 - std::pow(inlierRatio, (double)availableSampleSetSize)));
				if (newMaxIteration < maxIteration) maxIteration = newMaxIteration;

				inlierCount = currInlierCount;
				inlierFlags_.swap(currInlierFlags);
			}
		}

		++iteration_;
	}

	// Re-estimate with all inliers and loop until the number of inliers does not increase anymore.
	if (inlierCount >= minimalSampleSize_)
	{
		size_t oldInlierCount = inlierCount;
		do
		{
			if (!estimateModelFromInliers()) return -1;

			oldInlierCount = inlierCount;
			inlierCount = lookForInliers(inlierFlags_, threshold);
		} while (inlierCount > oldInlierCount);

		inlierCount = lookForInliers(inlierFlags_, threshold);
	}

	return inlierCount;
}
コード例 #8
0
size_t Ransac::runMLESAC(const size_t maxIterationCount, const size_t minInlierCount, const double alarmRatio, const bool isProsacSampling, const double inlierSquaredStandardDeviation, const double inlierThresholdProbability, const size_t maxEMIterationCount)
{
	const size_t availableSampleSetSize = usedSampleSize_ > 0 ? std::max(usedSampleSize_, minimalSampleSize_) : minimalSampleSize_;
	if (totalSampleSize_ < availableSampleSetSize)
		return -1;

	if (isProsacSampling) sortSamples();

	size_t maxIteration = maxIterationCount;

	size_t inlierCount = 0;
	inlierFlags_.resize(totalSampleSize_, false);
	std::vector<double> inlierProbs(totalSampleSize_, 0.0);
	double minNegativeLogLikelihood = std::numeric_limits<double>::max();

	std::vector<size_t> indices(availableSampleSetSize, -1);

	// TODO [check] >>
	//size_t prosacSampleCount = 10;
	size_t prosacSampleCount = availableSampleSetSize + 10;
	iteration_ = 0;
	const double& eps = swl::MathConstant::EPS;
	while (iteration_ < maxIteration && inlierCount < minInlierCount)
	{
		// Draw a sample.
		if (isProsacSampling)
		{
			drawRandomSample(prosacSampleCount, availableSampleSetSize, true, indices);

			// This incrementing strategy is naive and simple but works just fine most of the time.
			if (prosacSampleCount < totalSampleSize_)
				++prosacSampleCount;
		}
		else drawRandomSample(totalSampleSize_, availableSampleSetSize, false, indices);

		// Estimate a model.
		if (estimateModel(indices) && verifyModel())
		{
			// Compute inliers' probabilities.
			computeInlierProbabilities(inlierProbs, inlierSquaredStandardDeviation);

			// EM algorithm.
			const double tol = swl::MathConstant::TOL_5;

			double gamma = 0.5, prevGamma;
			for (size_t i = 0; i < maxEMIterationCount; ++i)
			{
				const double outlierProb = (1.0 - gamma) * inlierThresholdProbability;
				double sumInlierProb = 0.0;
				for (size_t k = 0; k < totalSampleSize_; ++k)
				{
					const double inlierProb = gamma * inlierProbs[k];
					sumInlierProb += inlierProb / (inlierProb + outlierProb);
				}

				prevGamma = gamma;
				gamma = sumInlierProb / totalSampleSize_;

				if (std::abs(gamma - prevGamma) < tol) break;
			}

			// Evaluate a model.
			const double outlierProb = (1.0 - gamma) * inlierThresholdProbability;
			double negativeLogLikelihood = 0.0;
			for (size_t k = 0; k < totalSampleSize_; ++k)
				negativeLogLikelihood -= std::log(gamma * inlierProbs[k] + outlierProb);  // Negative log likelihood.

			if (negativeLogLikelihood < minNegativeLogLikelihood)
			{
				const double denom = std::log(1.0 - std::pow(gamma, (double)availableSampleSetSize));
				if (std::abs(denom) > eps)
				{
					const size_t newMaxIteration = (size_t)std::floor(std::log(alarmRatio) / denom);
					if (newMaxIteration < maxIteration) maxIteration = newMaxIteration;
				}

				inlierCount = lookForInliers(inlierFlags_, inlierProbs, inlierThresholdProbability);

				minNegativeLogLikelihood = negativeLogLikelihood;
			}
		}

		++iteration_;
	}

	// Re-estimate with all inliers and loop until the number of inliers does not increase anymore.
	if (inlierCount >= minimalSampleSize_)
	{
		size_t oldInlierCount = 0;
		do
		{
			if (!estimateModelFromInliers()) return inlierCount;

			// Compute inliers' probabilities.
			computeInlierProbabilities(inlierProbs, inlierSquaredStandardDeviation);

			oldInlierCount = inlierCount;
			inlierCount = lookForInliers(inlierFlags_, inlierProbs, inlierThresholdProbability);
		} while (inlierCount > oldInlierCount);

		// Compute inliers' probabilities.
		computeInlierProbabilities(inlierProbs, inlierSquaredStandardDeviation);

		inlierCount = lookForInliers(inlierFlags_, inlierProbs, inlierThresholdProbability);
	}

	return inlierCount;
}
コード例 #9
0
/*
    construct sphere-tree for the model
*/
bool constructTree(const boost::filesystem::path& input_file,
                   bool toYAML)
{
  boost::filesystem::path output_file
    = input_file.parent_path () / boost::filesystem::basename (input_file);

  if (toYAML)
    output_file += "-spawn.yml";
  else
    output_file += "-spawn.sph";

  printf("Input file: %s\n", input_file.c_str ());
  printf("Output file: %s\n\n", output_file.c_str ());


  /*
      load the surface model
  */
  Surface sur;

  bool loaded = false;
  std::string extension = boost::algorithm::to_lower_copy (input_file.extension ().string ());
  if (extension == ".obj")
    loaded = loadOBJ(&sur, input_file.c_str ());
  else
    loaded = sur.loadSurface(input_file.c_str ());

  if (!loaded){
    printf("ERROR : Unable to load input file (%s)\n\n", input_file.c_str ());
    return false;
    }

  /*
      scale box
  */
  // FIXME: Disable scaling for now (wrong result if a transformation is applied after)
  //float boxScale = sur.fitIntoBox(1000);
  float boxScale = 1.;

  /*
      make medial tester
  */
  MedialTester mt;
  mt.setSurface(sur);
  mt.useLargeCover = true;

  /*
      setup evaluator
  */
  SEConvex convEval;
  convEval.setTester(mt);
  SEBase *eval = &convEval;

  Array<Point3D> sphPts;
  SESphPt sphEval;
  if (testerLevels > 0){   //  <= 0 will use convex tester
    SSIsohedron::generateSamples(&sphPts, testerLevels-1);
    sphEval.setup(mt, sphPts);
    eval = &sphEval;
    printf("Using concave tester (%d)\n\n", sphPts.getSize());
    }

  /*
      verify model
  */
  if (verify){
    bool ok = verifyModel(sur);
    if (!ok){
      printf("ERROR : model is not usable\n\n");
      return false;
      }
    }

  /*
      setup for the set of cover points
  */
  Array<Surface::Point> coverPts;
  MSGrid::generateSamples(&coverPts, numCoverPts, sur, TRUE, minCoverPts);
  printf("%d cover points\n", coverPts.getSize());

  /*
      setup SPAWN algorithm
  */
  SRSpawn spawn;
  spawn.setup(mt);
  spawn.useIterativeSelect = false;
  spawn.eval = eval;

  /*
      setup SphereTree constructor - using dynamic construction
  */
  STGGeneric treegen;
  treegen.eval = eval;
  treegen.useRefit = true;
  treegen.setSamples(coverPts);
  treegen.reducer = &spawn;

  /*
      make sphere-tree
  */
  SphereTree tree;
  tree.setupTree(branch, depth+1);

  waitForKey();
  treegen.constructTree(&tree);

  /*
     save sphere-tree
  */
  if (tree.saveSphereTree(output_file, 1.0f/boxScale)){
    Array<LevelEval> evals;
    if (eval){
      evaluateTree(&evals, tree, eval);
      writeEvaluation(stdout, evals);
      }

    if (!yaml)
    {
      FILE *f = fopen(output_file.c_str (), "a");
      if (f){
        fprintf(f, "\n\n");
        fprintf(f, "Options : \n");
        writeParam(stdout, intParams);
        writeParam(stdout, boolParams);
        fprintf(f, "\n\n");
        writeEvaluation(f, evals);
        fclose(f);
      }
    }

    return true;
    }
  else{
    return false;
    }
}