Esempio n. 1
0
    void
    intervalsTest() {
        static const uint DIM = 2;
        double minA = -2;
        double minB = 2;
        auto spline1 = SplineT2(minA, minA+3);
        auto spline2 = SplineT2(minB, minB+3);
        std::array<SplineT2, DIM> splines;
        splines[0] = spline1;
        splines[1] = spline2;
        auto e1 = spline1.intervals();
        auto e2 = spline2.intervals();
        Unknown<DIM, ValueT, SplineT2>
                unknown(1.0, splines, 1);
        auto intervals = unknown.intervals();

        EXPECT_TRUE(intervals.size() == 9);
        for (int i = 0; i < 3; i++) {
            for (int j = 0; j < 3; j++) {
                EXPECT_EQ(minA+0 + i, intervals[j+3*i].minima()[0]);
                EXPECT_EQ(minA+1 + i, intervals[j+3*i].maxima()[0]);
                EXPECT_EQ(minB+0 + j, intervals[j+3*i].minima()[1]);
                EXPECT_EQ(minB+1 + j, intervals[j+3*i].maxima()[1]);
            }
        }
    }
Esempio n. 2
0
    /**
     * Set the minimum and maximum of the workspace data. Code essentially copied from SignalRange.cpp
     * @param workspace Rreference to an IMD workspace
     * @returns The minimum and maximum value of the workspace dataset.
     */
    QwtDoubleInterval MetaDataExtractorUtils::getMinAndMax(Mantid::API::IMDWorkspace_sptr workspace)
    {
      if (!workspace)
        throw std::invalid_argument("The workspace is empty.");

      auto iterators = workspace->createIterators(PARALLEL_GET_MAX_THREADS, 0);

      std::vector<QwtDoubleInterval> intervals(iterators.size());
      // cppcheck-suppress syntaxError
      PRAGMA_OMP( parallel for schedule(dynamic, 1))
      for (int i=0; i < int(iterators.size()); i++)
      {
        Mantid::API::IMDIterator * it = iterators[i];

        QwtDoubleInterval range = this->getRange(it);
        intervals[i] = range;
        // don't delete iterator in parallel. MSVC doesn't like it
        // when the iterator points to a mock object.
      }

      // Combine the overall min/max
      double minSignal = DBL_MAX;
      double maxSignal = -DBL_MAX;

      auto inf = std::numeric_limits<double>::infinity();
      for (size_t i=0; i < iterators.size(); i++)
      {
        delete iterators[i];
        
        double signal;
        signal = intervals[i].minValue();
        if (signal != inf && signal < minSignal) minSignal = signal;

        signal = intervals[i].maxValue();
        if (signal != inf && signal > maxSignal) maxSignal = signal;
      }

      // Set the lowest element to the smallest non-zero element.
      if (minSignal == DBL_MAX)
      {
        minSignal = defaultMin;
        maxSignal = defaultMax;
      } 

      QwtDoubleInterval minMaxContainer;

      if (minSignal < maxSignal)
        minMaxContainer = QwtDoubleInterval(minSignal, maxSignal);
      else
      {
        if (minSignal != 0)
          // Possibly only one value in range
          minMaxContainer = QwtDoubleInterval(minSignal*0.5, minSignal*1.5);
        else
          // Other default value
          minMaxContainer = QwtDoubleInterval(defaultMin, defaultMax);
      }

      return minMaxContainer;
    }
Esempio n. 3
0
int main (int argc, char *argv[]) 
{
    if (!preprocessCommands(&argc, argv, NULL, NULL)) {
    	xexit(0); 
	}

    printf("Resource Reading...\n");
    ResourceSource* res = setupParameters(true, &argc, argv);

    ConstData constData(*res);
    Intervals intervals(*res);
    BeamParams beamParams(*res);

    FILE* massOut = openOutDataFile(*res, "mass_out");
    FILE* crossSectionOut = openOutDataFile(*res, "total_cross_section_out");
    printFileHeaders(massOut, crossSectionOut);

    printf("Main loop...\n");
    DataSeparator massSeparator(massOut), csSeparator(crossSectionOut);
    loopMassMuTan(constData, intervals, beamParams, massSeparator, csSeparator,
                  massOut, crossSectionOut);
    printf("Resource releasing...\n");

    fclose(crossSectionOut);
    fclose(massOut);

    delete res;

    printf("Done...\n");
    xexit(0);

}
Esempio n. 4
0
static void apply_paint_patheffect(const SkPaint& paint, Json::Value* target, bool sendBinaries) {
    SkPathEffect* pathEffect = paint.getPathEffect();
    if (pathEffect != nullptr) {
        SkPathEffect::DashInfo dashInfo;
        SkPathEffect::DashType dashType = pathEffect->asADash(&dashInfo);
        if (dashType == SkPathEffect::kDash_DashType) {
            dashInfo.fIntervals = (SkScalar*) sk_malloc_throw(dashInfo.fCount * sizeof(SkScalar));
            pathEffect->asADash(&dashInfo);
            Json::Value dashing(Json::objectValue);
            Json::Value intervals(Json::arrayValue);
            for (int32_t i = 0; i < dashInfo.fCount; i++) {
                intervals.append(Json::Value(dashInfo.fIntervals[i]));
            }
            free(dashInfo.fIntervals);
            dashing[SKJSONCANVAS_ATTRIBUTE_INTERVALS] = intervals;
            dashing[SKJSONCANVAS_ATTRIBUTE_PHASE] = dashInfo.fPhase;
            (*target)[SKJSONCANVAS_ATTRIBUTE_DASHING] = dashing;
        }
        else {
            Json::Value jsonPathEffect;
            flatten(pathEffect, &jsonPathEffect, sendBinaries);
            (*target)[SKJSONCANVAS_ATTRIBUTE_PATHEFFECT] = jsonPathEffect;
        }
    }
}
Esempio n. 5
0
int main()
{
  Interval_skip_list isl;
  int i, n, d;

  n = 10;
  d = 3;
  //std::cin >> n >> d;
  std::vector<Interval> intervals(n);
  for(i = 0; i < n; i++) {
    intervals[i] = Interval(i, i+d);
  }
  std::random_shuffle(intervals.begin(), intervals.end());

  isl.insert(intervals.begin(), intervals.end());

  for(i = 0; i < n+d; i++) {
    std::list<Interval> L;
    isl.find_intervals(i, std::back_inserter(L));
    for(std::list<Interval>::iterator it = L.begin(); it != L.end(); it++){
      std::cout << *it;
    }
    std::cout << std::endl;
  }

  for(i = 0; i < n; i++) {
    isl.remove(intervals[i]);
  }
  return 0;

}
SkFlattenable* SkDashPathEffect::CreateProc(SkReadBuffer& buffer) {
    const SkScalar phase = buffer.readScalar();
    uint32_t count = buffer.getArrayCount();
    SkAutoSTArray<32, SkScalar> intervals(count);
    if (buffer.readScalarArray(intervals.get(), count)) {
        return Create(intervals.get(), SkToInt(count), phase);
    }
    return nullptr;
}
Esempio n. 7
0
int main(int argc, const char *argv[]) {
    auto test = [](std::vector<Interval> vi) {
        Solution solution;
        std::vector<Interval> intervals(vi);
        auto result = solution.merge(intervals);
        std::copy(result.begin(), result.end(), std::ostream_iterator<Interval>(std::cout, ", "));
    };
    
    test({Interval(15, 18), Interval(8, 10), Interval(1, 3), Interval(2, 6)});
    
    return 0;
}
Esempio n. 8
0
int main(int argc, char **argv)
{
    QApplication a(argc, argv);

    QwtPlot plot;
    plot.setCanvasBackground(QColor(Qt::white));
    plot.setTitle("Histogram");

    QwtPlotGrid *grid = new QwtPlotGrid;
    grid->enableXMin(true);
    grid->enableYMin(true);
    grid->setMajPen(QPen(Qt::black, 0, Qt::DotLine));
    grid->setMinPen(QPen(Qt::gray, 0 , Qt::DotLine));
    grid->attach(&plot);

    HistogramItem *histogram = new HistogramItem();
    histogram->setColor(Qt::darkCyan);

    const int numValues = 20;

    QwtArray<QwtDoubleInterval> intervals(numValues);
    QwtArray<double> values(numValues);

    double pos = 0.0;
    for ( int i = 0; i < (int)intervals.size(); i++ )
    {
        const int width = 5 + rand() % 15;
        const int value = rand() % 100;

        intervals[i] = QwtDoubleInterval(pos, pos + double(width));
        values[i] = value; 

        pos += width;
    }

    histogram->setData(QwtIntervalData(intervals, values));
    histogram->attach(&plot);

    plot.setAxisScale(QwtPlot::yLeft, 0.0, 100.0);
    plot.setAxisScale(QwtPlot::xBottom, 0.0, pos);
    plot.replot();

#if QT_VERSION < 0x040000
    a.setMainWidget(&plot);
#endif

    plot.resize(600,400);
    plot.show();

    return a.exec(); 
}
Esempio n. 9
0
sk_sp<SkFlattenable> SkDashImpl::CreateProc(SkReadBuffer& buffer) {
    const SkScalar phase = buffer.readScalar();
    uint32_t count = buffer.getArrayCount();

    // Don't allocate gigantic buffers if there's not data for them.
    if (count > buffer.size() / sizeof(SkScalar)) {
        return nullptr;
    }

    SkAutoSTArray<32, SkScalar> intervals(count);
    if (buffer.readScalarArray(intervals.get(), count)) {
        return SkDashPathEffect::Make(intervals.get(), SkToInt(count), phase);
    }
    return nullptr;
}
Esempio n. 10
0
int main( int argc, char **argv )
{
  //using HistogramItem = QwtPlotItem;
  using HistogramItem = QwtPlotHistogram;
  //using QwtIntervalData = QwtSeriesData<QwtIntervalSample>;

  QApplication a(argc, argv);
  QwtPlot plot;
  plot.setCanvasBackground(QColor(Qt::white));
  plot.setTitle("Histogram");
  QwtPlotGrid *grid = new QwtPlotGrid;
  grid->enableXMin(true);
  grid->enableYMin(true);

  grid->setMajorPen(QPen(Qt::black, 0, Qt::DotLine));
  grid->setMinorPen(QPen(Qt::gray, 0 , Qt::DotLine));
  grid->attach(&plot);
  HistogramItem *histogram = new HistogramItem;
  //histogram->setColor(Qt::darkCyan);
  const int numValues = 20;
  //QwtArray<QwtDoubleInterval> intervals(numValues);
  QwtArray<QwtIntervalSample> intervals(numValues);
  QwtArray<double> values(numValues);
  double pos = 0.0;
  for ( int i = 0; i < (int)intervals.size(); i++ )
  {
    //const int width = 5 + rand() % 15;
    const int value = rand() % 100;
    //intervals[i] = QwtDoubleInterval(pos, pos + double(width));
    intervals[i] = QwtIntervalSample(value, pos, pos + double(width));
    //values[i] = value;
    pos += width;
  }

  //histogram->setData(QwtIntervalData(intervals, values));
  histogram->setSamples(intervals);
  //histogram->setSamples(QwtIntervalData(intervals, values));
  //QwtIntervalData d;
  //histogram->setData(d);
  histogram->attach(&plot);
  plot.setAxisScale(QwtPlot::yLeft, 0.0, 100.0);
  plot.setAxisScale(QwtPlot::xBottom, 0.0, pos);
  plot.replot();
  plot.resize(600,400);
  plot.show();
  return a.exec();
}
Esempio n. 11
0
Output_args initializeOutputStreams(string & name_prefix, bool seq_only, 
		bool discard_secondary_alignments, Packet_courier * courier) {
	shared_ptr<ofstream> intervals(new ofstream("genomic_intervals.txt"));
	Output_args oa(seq_only);
	oa.offsets_buf = shared_ptr<OutputBuffer>(new OutputBuffer(courier, intervals, name_prefix, ".offs.lz", 1<<22, 20) );
	oa.edits_buf = shared_ptr<OutputBuffer>(new OutputBuffer(courier, intervals, name_prefix, ".edits.lz" ) );
	// set dictionary size to be small -- this is a barely compressible stream, so we won't try hard
	oa.has_edits_buf = shared_ptr<OutputBuffer>(new OutputBuffer(courier, intervals, name_prefix, ".has_edits.lz", 1 << 20, 5 ) );
	oa.left_clips_buf = shared_ptr<OutputBuffer>(new OutputBuffer(courier, intervals, name_prefix, ".left_clip.lz", 1<<22, 20) );
	oa.right_clips_buf = shared_ptr<OutputBuffer>(new OutputBuffer(courier, intervals, name_prefix, ".right_clip.lz", 1<<22, 20) );
	oa.unaligned_buf = shared_ptr<OutputBuffer>(new OutputBuffer(courier, name_prefix, ".unaligned.lz", 3<<20, 12 ) );
	if (!seq_only) {
		oa.flags_buf = shared_ptr<OutputBuffer>(new OutputBuffer(courier, intervals, name_prefix, ".flags.lz" ) );
		oa.ids_buf = shared_ptr<OutputBuffer>(new OutputBuffer(courier, intervals, name_prefix, ".ids.lz", 3 << 20,  12 ) );
		oa.opt_buf = shared_ptr<OutputBuffer>(new OutputBuffer(courier, intervals, name_prefix, ".opt.lz" ) );
		oa.quals_buf = shared_ptr<QualityCompressor>(new QualityCompressor(courier, intervals, name_prefix.c_str(), 0.05, 200000, 4 ) );
	}
	return oa;
};
Esempio n. 12
0
GrStrokeInfo TestStrokeInfo(SkRandom* random) {
    SkStrokeRec::InitStyle style =
            SkStrokeRec::InitStyle(random->nextULessThan(SkStrokeRec::kFill_InitStyle + 1));
    GrStrokeInfo strokeInfo(style);
    randomize_stroke_rec(&strokeInfo, random);
    SkPathEffect::DashInfo dashInfo;
    dashInfo.fCount = random->nextRangeU(1, 50) * 2;
    SkAutoTDeleteArray<SkScalar> intervals(SkNEW_ARRAY(SkScalar, dashInfo.fCount));
    dashInfo.fIntervals = intervals.get();
    SkScalar sum = 0;
    for (int i = 0; i < dashInfo.fCount; i++) {
        dashInfo.fIntervals[i] = random->nextRangeScalar(SkDoubleToScalar(0.01),
                                                         SkDoubleToScalar(10.0));
        sum += dashInfo.fIntervals[i];
    }
    dashInfo.fPhase = random->nextRangeScalar(0, sum);
    strokeInfo.setDashInfo(dashInfo);
    return strokeInfo;
}
Esempio n. 13
0
void TestStyle(SkRandom* random, GrStyle* style) {
    SkStrokeRec::InitStyle initStyle =
            SkStrokeRec::InitStyle(random->nextULessThan(SkStrokeRec::kFill_InitStyle + 1));
    SkStrokeRec stroke(initStyle);
    randomize_stroke_rec(&stroke, random);
    sk_sp<SkPathEffect> pe;
    if (random->nextBool()) {
        int cnt = random->nextRangeU(1, 50) * 2;
        std::unique_ptr<SkScalar[]> intervals(new SkScalar[cnt]);
        SkScalar sum = 0;
        for (int i = 0; i < cnt; i++) {
            intervals[i] = random->nextRangeScalar(SkDoubleToScalar(0.01),
                                                   SkDoubleToScalar(10.0));
            sum += intervals[i];
        }
        SkScalar phase = random->nextRangeScalar(0, sum);
        pe = TestDashPathEffect::Make(intervals.get(), cnt, phase);
    }
    *style = GrStyle(stroke, std::move(pe));
}
Esempio n. 14
0
/*////////////////////////////////////////////////////////////////
Parse genomic coordinates file, return a map of vectors of intervals
grouped by type of data stream
////////////////////////////////////////////////////////////////*/
unordered_map<string,shared_ptr<vector<TrueGenomicInterval>>>
	parseGenomicIntervals(string const & fname) {
	ifstream f_in(fname);
	check_file_open(f_in, fname);
	string line;
	unordered_map<string, shared_ptr<vector<TrueGenomicInterval>> > map;
	while ( getline(f_in, line) ) {
		auto space = line.find(' ');
		string suffix = line.substr(0, space);
		if (map.find(suffix) == map.end()) {
			shared_ptr<vector<TrueGenomicInterval>> intervals(new vector<TrueGenomicInterval>());
			map[suffix] = intervals;
		}
		auto second_space = line.find(' ', space + 1);
		unsigned long num_alignments = stoul(line.substr(space + 1, second_space));
		map[suffix]->emplace_back( line.substr(second_space + 1), num_alignments );
	}
	f_in.close();
	return map;
}
void GenomicRegionCollection<T>::MergeOverlappingIntervals() {

  // make the list
  std::list<T> intervals(m_grv->begin(), m_grv->end());

  intervals.sort();
  typename std::list<T>::iterator inext(intervals.begin());
  ++inext;
  for (typename std::list<T>::iterator i(intervals.begin()), iend(intervals.end()); inext != iend;) {
    if((i->pos2 >= inext->pos1) && (i->chr == inext->chr)) // change >= to > to not overlap touching intervals (eg [4,5][5,6])
      {
	if(i->pos2 >= inext->pos2) intervals.erase(inext++);
	else if(i->pos2 < inext->pos2)
	  { i->pos2 = inext->pos2; intervals.erase(inext++); }
      }
    else { ++i; ++inext; }
  }

  // move it over to a grv
  m_grv->clear(); // clear the old data 

  // c++11
  //std::vector<T> v{ std::make_move_iterator(std::begin(intervals)), 
  //    std::make_move_iterator(std::end(intervals)) };
  //m_grv->insert(m_grv->end(), v.begin(), v.end());

  // non c++11
  //std::vector<T> v;
  // v.push_back(std::make_move_iterator(std::begin(intervals)));
  //v.push_back(std::make_move_iterator(std::end(intervals)));
  //std::vector<T> v{ std::make_move_iterator(std::begin(intervals)), 
  //    std::make_move_iterator(std::end(intervals)) };
  //m_grv->insert(m_grv->end(), v.begin(), v.end());
  //m_grv->reserve(intervals.size());
  //m_grv->append(intervals.begin(), intervals.end());
  m_grv->insert(m_grv->end(), intervals.begin(), intervals.end());

  // clear the old interval tree
  m_tree->clear();
}
Esempio n. 16
0
std::pair<int,double> slice_sample_multi(vector<double>& X0, vector<slice_function*>& g, double w, int m)
{
  int N = g.size();

  double g1x0 = (*g[0])();

  assert(std::abs(g1x0 - (*g[0])(X0[0])) < 1.0e-9);

  // Determine the slice level, in log terms.

  double logy = g1x0 - exponential(1);

  // Find the initial interval to sample from - in G[0]

  vector<std::pair<double,double> > intervals(N);

  intervals[0] = find_slice_boundaries_stepping_out(X0[0],*g[0],logy,w,m);

  for(int i=1;i<N;i++)
    intervals[i] = find_slice_boundaries_search(X0[i],*g[i],logy,w,m);

  // Sample from the intervals, shrinking them on each rejection
  return search_multi_intervals(X0,intervals,g,logy);
}
Esempio n. 17
0
  //Acceps a query, calls the SAT solver and generates Valid/InValid.
  //if returned 0 then input is INVALID if returned 1 then input is
  //VALID if returned 2 then UNDECIDED
  SOLVER_RETURN_TYPE
  STP::TopLevelSTPAux(SATSolver& NewSolver, const ASTNode& original_input)
  {
    bm->ASTNodeStats("input asserts and query: ", original_input);

    DifficultyScore difficulty;
    if (bm->UserFlags.stats_flag)
            cerr << "Difficulty Initially:" << difficulty.score(original_input) << endl;

    // A heap object so I can easily control its lifetime.
    std::auto_ptr<BVSolver> bvSolver(new BVSolver(bm, simp));
    std::auto_ptr<PropagateEqualities> pe (new PropagateEqualities(simp,bm->defaultNodeFactory,bm));

    ASTNode simplified_solved_InputToSAT = original_input;

    // If the number of array reads is small. We rewrite them through.
    // The bit-vector simplifications are more thorough than the array simplifications. For example,
    // we don't currently do unconstrained elimination on arrays--- but we do for bit-vectors.
    // A better way to do this would be to estimate the number of axioms introduced.
    // TODO: I chose the number of reads we perform this operation at randomly.
    bool removed = false;
    if (((bm->UserFlags.ackermannisation && numberOfReadsLessThan(simplified_solved_InputToSAT,50)) || bm->UserFlags.isSet("upfront-ack", "0"))
        || numberOfReadsLessThan(simplified_solved_InputToSAT,10)
    )
      {
              // If the number of axioms that would be added it small. Remove them.
              bm->UserFlags.ackermannisation = true;
              simplified_solved_InputToSAT = arrayTransformer->TransformFormula_TopLevel(simplified_solved_InputToSAT);
              if (bm->UserFlags.stats_flag)
                cerr << "Have removed array operations" << endl;
              removed = true;
      }

    const bool arrayops = containsArrayOps(simplified_solved_InputToSAT);
    if (removed)
      assert(!arrayops);

    // Run size reducing just once.
    simplified_solved_InputToSAT = sizeReducing(simplified_solved_InputToSAT, bvSolver.get(),pe.get());

    unsigned initial_difficulty_score = difficulty.score(simplified_solved_InputToSAT);

    int bitblasted_difficulty = -1;

    // Fixed point it if it's not too difficult.
    // Currently we discards all the state each time sizeReducing is called,
    // so it's expensive to call.
    if ((!arrayops && initial_difficulty_score < 1000000) || bm->UserFlags.isSet("preserving-fixedpoint", "0"))
           simplified_solved_InputToSAT = callSizeReducing(simplified_solved_InputToSAT, bvSolver.get(),pe.get(), initial_difficulty_score, bitblasted_difficulty);

    if ((!arrayops || bm->UserFlags.isSet("array-difficulty-reversion", "1")))
      {
        initial_difficulty_score = difficulty.score(simplified_solved_InputToSAT);
      }

    if (bitblasted_difficulty != -1 && bm->UserFlags.stats_flag)
      std::cout << "Initial Bitblasted size:" << bitblasted_difficulty << endl;


    if (bm->UserFlags.stats_flag)
      std::cout << "Difficulty After Size reducing:" << initial_difficulty_score << endl;

    // So we can delete the object and release all the hash-buckets storage.
    std::auto_ptr<Revert_to> revert(new Revert_to());

    if ((!arrayops || bm->UserFlags.isSet("array-difficulty-reversion", "1")))
      {
        revert->initialSolverMap.insert(simp->Return_SolverMap()->begin(), simp->Return_SolverMap()->end());
        revert->backup_arrayToIndexToRead.insert(arrayTransformer->arrayToIndexToRead.begin(),arrayTransformer->arrayToIndexToRead.end());
        revert->toRevertTo = simplified_solved_InputToSAT;
      }

    ASTNode inputToSAT;

    //round of substitution, solving, and simplification. ensures that
    //DAG is minimized as much as possibly, and ideally should
    //garuntee that all liketerms in BVPLUSes have been combined.
    bm->SimplifyWrites_InPlace_Flag = false;
    //bm->Begin_RemoveWrites = false;
    //bm->start_abstracting = false;
    bm->TermsAlreadySeenMap_Clear();
    do
      {
        inputToSAT = simplified_solved_InputToSAT;

        if (bm->soft_timeout_expired)
            return SOLVER_TIMEOUT;

        if (bm->UserFlags.optimize_flag)
          {
            simplified_solved_InputToSAT = pe->topLevel(simplified_solved_InputToSAT, arrayTransformer);

            // Imagine:
            // The simplifier simplifies (0 + T) to T
            // Then bvsolve introduces (0 + T)
            // Then CreateSubstitutionMap decides T maps to a constant, but leaving another (0+T).
            // When we go to simplify (0 + T) will still be in the simplify cache, so will be mapped to T.
            // But it shouldn't be T, it should be a constant.
            // Applying the substitution map fixes this case.
            //
            if (simp->hasUnappliedSubstitutions())
              {
                simplified_solved_InputToSAT = simp->applySubstitutionMap(simplified_solved_InputToSAT);
                simp->haveAppliedSubstitutionMap();
              }

            bm->ASTNodeStats(pe_message.c_str(), simplified_solved_InputToSAT);

            simplified_solved_InputToSAT = simp->SimplifyFormula_TopLevel(simplified_solved_InputToSAT, false);

            bm->ASTNodeStats(size_inc_message.c_str(), simplified_solved_InputToSAT);
          }

        if (bm->UserFlags.wordlevel_solve_flag && bm->UserFlags.optimize_flag)
          {
            simplified_solved_InputToSAT = bvSolver->TopLevelBVSolve(simplified_solved_InputToSAT);
            bm->ASTNodeStats(bitvec_message.c_str(), simplified_solved_InputToSAT);
          }
      }
    while (inputToSAT != simplified_solved_InputToSAT);

    if (bm->UserFlags.bitConstantProp_flag)
      {
        bm->GetRunTimes()->start(RunTimes::ConstantBitPropagation);
        simplifier::constantBitP::ConstantBitPropagation cb(simp, bm->defaultNodeFactory, simplified_solved_InputToSAT);
        simplified_solved_InputToSAT = cb.topLevelBothWays(simplified_solved_InputToSAT);

        bm->GetRunTimes()->stop(RunTimes::ConstantBitPropagation);

        if (cb.isUnsatisfiable())
          simplified_solved_InputToSAT = bm->ASTFalse;

        bm->ASTNodeStats(cb_message.c_str(), simplified_solved_InputToSAT);
      }

    if (bm->UserFlags.isSet("use-intervals", "1"))
      {
        EstablishIntervals intervals(*bm);
        simplified_solved_InputToSAT = intervals.topLevel_unsignedIntervals(simplified_solved_InputToSAT);
        bm->ASTNodeStats(int_message.c_str(), simplified_solved_InputToSAT);
      }

    // Find pure literals.
    if (bm->UserFlags.isSet("pure-literals", "1"))
      {
        FindPureLiterals fpl;
        bool changed = fpl.topLevel(simplified_solved_InputToSAT, simp, bm);
        if (changed)
          {
            simplified_solved_InputToSAT = simp->applySubstitutionMap(simplified_solved_InputToSAT);
            simp->haveAppliedSubstitutionMap();
            bm->ASTNodeStats(pl_message.c_str(), simplified_solved_InputToSAT);
          }
      }

    if (bm->soft_timeout_expired)
        return SOLVER_TIMEOUT;

    // Simplify using Ite context
    if (bm->UserFlags.optimize_flag && bm->UserFlags.isSet("ite-context", "0"))
      {
        UseITEContext iteC(bm);
        simplified_solved_InputToSAT = iteC.topLevel(simplified_solved_InputToSAT);
        bm->ASTNodeStats("After ITE Context: ", simplified_solved_InputToSAT);
      }

    if (bm->UserFlags.isSet("aig-core-simplify", "0"))
      {
        AIGSimplifyPropositionalCore aigRR(bm);
        simplified_solved_InputToSAT = aigRR.topLevel(simplified_solved_InputToSAT);
        bm->ASTNodeStats("After AIG Core: ", simplified_solved_InputToSAT);
      }

#if 0
    bm->ASTNodeStats("Before SimplifyWrites_Inplace begins: ", simplified_solved_InputToSAT);

    bm->SimplifyWrites_InPlace_Flag = true;
    bm->Begin_RemoveWrites = false;
    bm->start_abstracting = false;
    bm->TermsAlreadySeenMap_Clear();
    do
      {
        inputToSAT = simplified_solved_InputToSAT;

        if (bm->UserFlags.optimize_flag)
          {
            simplified_solved_InputToSAT = pe->topLevel(simplified_solved_InputToSAT, arrayTransformer);

            if (simp->hasUnappliedSubstitutions())
              {
                simplified_solved_InputToSAT = simp->applySubstitutionMap(simplified_solved_InputToSAT);
                simp->haveAppliedSubstitutionMap();
              }

            bm->ASTNodeStats(pe->message.c_str(), simplified_solved_InputToSAT);

            simplified_solved_InputToSAT = simp->SimplifyFormula_TopLevel(simplified_solved_InputToSAT, false);
            bm->ASTNodeStats("after simplification: ", simplified_solved_InputToSAT);


            if (bm->UserFlags.isSet("always-true", "0"))
              {
                SimplifyingNodeFactory nf(*(bm->hashingNodeFactory), *bm);
                AlwaysTrue always (simp,bm,&nf);
                simplified_solved_InputToSAT = always.topLevel(simplified_solved_InputToSAT);
                bm->ASTNodeStats("After removing always true: ", simplified_solved_InputToSAT);
              }
          }

        // The word level solver uses the simplifier to apply the rewrites it makes,
        // without optimisations enabled. It will enter infinite loops on some input.
        // Instead it could use the apply function of the substitution map, but it
        // doesn't yet...
        if (bm->UserFlags.wordlevel_solve_flag && bm->UserFlags.optimize_flag)
          {
            simplified_solved_InputToSAT = bvSolver->TopLevelBVSolve(simplified_solved_InputToSAT);
            bm->ASTNodeStats("after solving: ", simplified_solved_InputToSAT);
          }
      }
    while (inputToSAT != simplified_solved_InputToSAT);

    bm->ASTNodeStats("After SimplifyWrites_Inplace: ", simplified_solved_InputToSAT);
#endif

    if (bm->UserFlags.isSet("enable-unconstrained", "1"))
      {
        // Remove unconstrained.
        RemoveUnconstrained r(*bm);
        simplified_solved_InputToSAT = r.topLevel(simplified_solved_InputToSAT, simp);
        bm->ASTNodeStats(uc_message.c_str(), simplified_solved_InputToSAT);
      }

    bm->TermsAlreadySeenMap_Clear();

    //bm->start_abstracting = false;
    bm->SimplifyWrites_InPlace_Flag = false;
    //bm->Begin_RemoveWrites = false;

    long final_difficulty_score = difficulty.score(simplified_solved_InputToSAT);

    bool worse= false;
    if (final_difficulty_score > 1.1 * initial_difficulty_score)
        worse = true;

    // It's of course very wasteful to do this! Later I'll make it reuse the work..
    // We bit-blast again, in order to throw it away, so that we can measure whether
    // the number of AIG nodes is smaller. The difficulty score is sometimes completely
    // wrong, the sage-app7 are the motivating examples. The other way to improve it would
    // be to fix the difficulty scorer!
    if (!worse && (bitblasted_difficulty != -1))
     {
        BBNodeManagerAIG bbnm;
        BitBlaster<BBNodeAIG, BBNodeManagerAIG> bb(&bbnm, simp, bm->defaultNodeFactory , &(bm->UserFlags));
        bb.BBForm(simplified_solved_InputToSAT);
        int newBB=  bbnm.totalNumberOfNodes();
        if (bm->UserFlags.stats_flag)
          cerr << "Final BB Size:" << newBB << endl;

        if (bitblasted_difficulty < newBB)
          worse = true;
    }


    if (bm->UserFlags.stats_flag)
      {
        cerr << "Initial Difficulty Score:" << initial_difficulty_score << endl;
        cerr << "Final Difficulty Score:" << final_difficulty_score << endl;
      }

    bool optimize_enabled = bm->UserFlags.optimize_flag;
    if (worse &&
       (!arrayops || bm->UserFlags.isSet("array-difficulty-reversion", "1")) &&
       bm->UserFlags.isSet("difficulty-reversion", "1"))
      {
        // If the simplified problem is harder, than the
        // initial problem we revert back to the initial
        // problem.

        if (bm->UserFlags.stats_flag)
          cerr << "simplification made the problem harder, reverting." << endl;
        simplified_solved_InputToSAT = revert->toRevertTo;

        // I do this to clear the substitution/solver map.
        // Not sure what would happen if it contained simplifications
        // that haven't been applied.
        simp->ClearAllTables();

        simp->Return_SolverMap()->insert(revert->initialSolverMap.begin(), revert->initialSolverMap.end());
        revert->initialSolverMap.clear();

        // Copy back what we knew about arrays at the start..
        arrayTransformer->arrayToIndexToRead.clear();
        arrayTransformer->arrayToIndexToRead.insert(revert->backup_arrayToIndexToRead.begin(), revert->backup_arrayToIndexToRead.end());

        // The arrayTransformer calls simplify. We don't want
        // it to put back in all the bad simplifications.
        bm->UserFlags.optimize_flag = false;
      }
    revert.reset(NULL);

    simplified_solved_InputToSAT = arrayTransformer->TransformFormula_TopLevel(simplified_solved_InputToSAT);
    bm->ASTNodeStats("after transformation: ", simplified_solved_InputToSAT);
    bm->TermsAlreadySeenMap_Clear();

    bm->UserFlags.optimize_flag = optimize_enabled;

    SOLVER_RETURN_TYPE res;
    if (!bm->UserFlags.ackermannisation)
      {
        bm->counterexample_checking_during_refinement = true;
      }

    // We are about to solve. Clear out all the memory associated with caches
    // that we won't need again.
    simp->ClearCaches();
    simp->haveAppliedSubstitutionMap();
    bm->ClearAllTables();

    // Deleting it clears out all the buckets associated with hashmaps etc. too.
    bvSolver.reset(NULL);
    pe.reset(NULL);


    if (bm->UserFlags.stats_flag)
      simp->printCacheStatus();

    const bool maybeRefinement = arrayops && !bm->UserFlags.ackermannisation;

    simplifier::constantBitP::ConstantBitPropagation* cb = NULL;
    std::auto_ptr<simplifier::constantBitP::ConstantBitPropagation> cleaner;

    if (bm->UserFlags.bitConstantProp_flag)
      {
        bm->GetRunTimes()->start(RunTimes::ConstantBitPropagation);
        cb = new simplifier::constantBitP::ConstantBitPropagation(simp, bm->defaultNodeFactory,
            simplified_solved_InputToSAT);
        cleaner.reset(cb);
        bm->GetRunTimes()->stop(RunTimes::ConstantBitPropagation);

        bm->ASTNodeStats(cb_message.c_str(), simplified_solved_InputToSAT);

        if (cb->isUnsatisfiable())
          simplified_solved_InputToSAT = bm->ASTFalse;
      }

    ToSATAIG toSATAIG(bm, cb, arrayTransformer);

    ToSATBase* satBase = bm->UserFlags.isSet("traditional-cnf", "0") ? tosat : &toSATAIG;

    if (bm->soft_timeout_expired)
        return SOLVER_TIMEOUT;

    // If it doesn't contain array operations, use ABC's CNF generation.
    res = Ctr_Example->CallSAT_ResultCheck(NewSolver, simplified_solved_InputToSAT, original_input, satBase,
        maybeRefinement);

    if (bm->soft_timeout_expired)
      {
        if (toSATAIG.cbIsDestructed())
          cleaner.release();

        return SOLVER_TIMEOUT;
      }
    if (SOLVER_UNDECIDED != res)
      {
        // If the aig converter knows that it is never going to be called again,
        // it deletes the constant bit stuff before calling the SAT solver.
        if (toSATAIG.cbIsDestructed())
          cleaner.release();

        CountersAndStats("print_func_stats", bm);
        return res;
      }

    assert(arrayops); // should only go to abstraction refinement if there are array ops.
    assert(!bm->UserFlags.ackermannisation); // Refinement must be enabled too.
    assert (bm->UserFlags.solver_to_use != UserDefinedFlags::MINISAT_PROPAGATORS); // The array solver shouldn't have returned undecided..

    res = Ctr_Example->SATBased_ArrayReadRefinement(NewSolver, simplified_solved_InputToSAT, original_input, satBase);
    if (SOLVER_UNDECIDED != res)
      {
        if (toSATAIG.cbIsDestructed())
          cleaner.release();

        CountersAndStats("print_func_stats", bm);
        return res;
      }

    #if 0
    res = Ctr_Example->SATBased_ArrayWriteRefinement(NewSolver, original_input, satBase);
    if (SOLVER_UNDECIDED != res)
      {
        if (toSATAIG.cbIsDestructed())
          cleaner.release();

        CountersAndStats("print_func_stats", bm);
        return res;
      }

    res = Ctr_Example->SATBased_ArrayReadRefinement(NewSolver, simplified_solved_InputToSAT, original_input, satBase);
    if (SOLVER_UNDECIDED != res)
      {
        if (toSATAIG.cbIsDestructed())
          cleaner.release();

        CountersAndStats("print_func_stats", bm);
        return res;
      }
    #endif

    FatalError("TopLevelSTPAux: reached the end without proper conclusion:"
      "either a divide by zero in the input or a bug in STP");
    //bogus return to make the compiler shut up
    return SOLVER_ERROR;


  } //End of TopLevelSTPAux
Esempio n. 18
0
  // These transformations should never increase the size of the DAG.
   ASTNode
  STP::sizeReducing(ASTNode simplified_solved_InputToSAT, BVSolver* bvSolver, PropagateEqualities *pe)
  {

    simplified_solved_InputToSAT = pe->topLevel(simplified_solved_InputToSAT, arrayTransformer);
    if (simp->hasUnappliedSubstitutions())
      {
        simplified_solved_InputToSAT = simp->applySubstitutionMap(simplified_solved_InputToSAT);
        simp->haveAppliedSubstitutionMap();
        bm->ASTNodeStats(pe_message.c_str(), simplified_solved_InputToSAT);
      }

    if (bm->UserFlags.isSet("enable-unconstrained", "1"))
      {
        // Remove unconstrained.
        RemoveUnconstrained r1(*bm);
        simplified_solved_InputToSAT = r1.topLevel(simplified_solved_InputToSAT, simp);
        bm->ASTNodeStats(uc_message.c_str(), simplified_solved_InputToSAT);
      }

    if (bm->UserFlags.isSet("use-intervals", "1"))
      {
        EstablishIntervals intervals(*bm);
        simplified_solved_InputToSAT = intervals.topLevel_unsignedIntervals(simplified_solved_InputToSAT);
        bm->ASTNodeStats(int_message.c_str(), simplified_solved_InputToSAT);
      }

    if (bm->UserFlags.bitConstantProp_flag)
      {
        bm->GetRunTimes()->start(RunTimes::ConstantBitPropagation);
        simplifier::constantBitP::ConstantBitPropagation cb(simp, bm->defaultNodeFactory, simplified_solved_InputToSAT);
        simplified_solved_InputToSAT = cb.topLevelBothWays(simplified_solved_InputToSAT, true,false);

        bm->GetRunTimes()->stop(RunTimes::ConstantBitPropagation);

        if (cb.isUnsatisfiable())
          simplified_solved_InputToSAT = bm->ASTFalse;

        if (simp->hasUnappliedSubstitutions())
          {
          simplified_solved_InputToSAT = simp->applySubstitutionMap(simplified_solved_InputToSAT);
          simp->haveAppliedSubstitutionMap();
          }

        bm->ASTNodeStats(cb_message.c_str(), simplified_solved_InputToSAT);
      }

    // Find pure literals.
    if (bm->UserFlags.isSet("pure-literals", "1"))
      {
        FindPureLiterals fpl;
        bool changed = fpl.topLevel(simplified_solved_InputToSAT, simp, bm);
        if (changed)
          {
            simplified_solved_InputToSAT = simp->applySubstitutionMap(simplified_solved_InputToSAT);
            simp->haveAppliedSubstitutionMap();
            bm->ASTNodeStats(pl_message.c_str() , simplified_solved_InputToSAT);
          }
      }

    if (bm->UserFlags.isSet("always-true", "0"))
      {
        AlwaysTrue always (simp,bm,bm->defaultNodeFactory);
        simplified_solved_InputToSAT = always.topLevel(simplified_solved_InputToSAT);
        bm->ASTNodeStats("After removing always true: ", simplified_solved_InputToSAT);
      }

    if (bm->UserFlags.wordlevel_solve_flag && bm->UserFlags.optimize_flag)
      {
        simplified_solved_InputToSAT = bvSolver->TopLevelBVSolve(simplified_solved_InputToSAT, false);
        bm->ASTNodeStats(bitvec_message.c_str(), simplified_solved_InputToSAT);
      }

    return simplified_solved_InputToSAT;
  }
Esempio n. 19
0
  //Acceps a query, calls the SAT solver and generates Valid/InValid.
  //if returned 0 then input is INVALID if returned 1 then input is
  //VALID if returned 2 then UNDECIDED
  SOLVER_RETURN_TYPE
  STP::TopLevelSTPAux(SATSolver& NewSolver, const ASTNode& modified_input, const ASTNode& original_input)
  {

    ASTNode inputToSAT = modified_input;
    ASTNode orig_input = original_input;
    bm->ASTNodeStats("input asserts and query: ", inputToSAT);

    ASTNode simplified_solved_InputToSAT = inputToSAT;
    const bool arrayops = containsArrayOps(original_input);

    DifficultyScore difficulty;
    if (bm->UserFlags.stats_flag)
      cerr << "Difficulty Initially:" << difficulty.score(original_input) << endl;

    // A heap object so I can easily control its lifetime.
    BVSolver* bvSolver = new BVSolver(bm, simp);

    simplified_solved_InputToSAT = sizeReducing(inputToSAT, bvSolver);

    unsigned initial_difficulty_score = difficulty.score(simplified_solved_InputToSAT);

    // Fixed point it if it's not too difficult.
    // Currently we discards all the state each time sizeReducing is called,
    // so it's expensive to call.
    if (!arrayops && initial_difficulty_score < 1000000)
      {
        simplified_solved_InputToSAT = callSizeReducing(simplified_solved_InputToSAT, bvSolver, initial_difficulty_score);
        initial_difficulty_score = difficulty.score(simplified_solved_InputToSAT);
      }

    if (bm->UserFlags.stats_flag)
      cout << "Difficulty After Size reducing:" << initial_difficulty_score << endl;

    // Copy the solver map incase we need to revert.
    ASTNodeMap initialSolverMap;
    ASTNode toRevertTo;
    if (!arrayops) // we don't revert for Array problems yet, so don't copy it.
      {
        initialSolverMap.insert(simp->Return_SolverMap()->begin(), simp->Return_SolverMap()->end());
        toRevertTo = simplified_solved_InputToSAT;
      }

    //round of substitution, solving, and simplification. ensures that
    //DAG is minimized as much as possibly, and ideally should
    //garuntee that all liketerms in BVPLUSes have been combined.
    bm->SimplifyWrites_InPlace_Flag = false;
    bm->Begin_RemoveWrites = false;
    bm->start_abstracting = false;
    bm->TermsAlreadySeenMap_Clear();
    do
      {
        inputToSAT = simplified_solved_InputToSAT;

        if (bm->UserFlags.optimize_flag)
          {
            simplified_solved_InputToSAT = simp->CreateSubstitutionMap(simplified_solved_InputToSAT, arrayTransformer);

            // Imagine:
            // The simplifier simplifies (0 + T) to T
            // Then bvsolve introduces (0 + T)
            // Then CreateSubstitutionMap decides T maps to a constant, but leaving another (0+T).
            // When we go to simplify (0 + T) will still be in the simplify cache, so will be mapped to T.
            // But it shouldn't be T, it should be a constant.
            // Applying the substitution map fixes this case.
            //
            if (simp->hasUnappliedSubstitutions())
              {
                simplified_solved_InputToSAT = simp->applySubstitutionMap(simplified_solved_InputToSAT);
                simp->haveAppliedSubstitutionMap();
              }

            bm->ASTNodeStats("after pure substitution: ", simplified_solved_InputToSAT);

            simplified_solved_InputToSAT = simp->SimplifyFormula_TopLevel(simplified_solved_InputToSAT, false);

            bm->ASTNodeStats("after simplification: ", simplified_solved_InputToSAT);
          }

        if (bm->UserFlags.wordlevel_solve_flag && bm->UserFlags.optimize_flag)
          {
            simplified_solved_InputToSAT = bvSolver->TopLevelBVSolve(simplified_solved_InputToSAT);
            bm->ASTNodeStats("after solving: ", simplified_solved_InputToSAT);
          }
      }
    while (inputToSAT != simplified_solved_InputToSAT);

    if (bm->UserFlags.bitConstantProp_flag)
      {
        bm->GetRunTimes()->start(RunTimes::ConstantBitPropagation);
        SimplifyingNodeFactory nf(*(bm->hashingNodeFactory), *bm);
        simplifier::constantBitP::ConstantBitPropagation cb(simp, &nf, simplified_solved_InputToSAT);
        simplified_solved_InputToSAT = cb.topLevelBothWays(simplified_solved_InputToSAT);

        bm->GetRunTimes()->stop(RunTimes::ConstantBitPropagation);

        if (cb.isUnsatisfiable())
          simplified_solved_InputToSAT = bm->ASTFalse;

        bm->ASTNodeStats("After Constant Bit Propagation begins: ", simplified_solved_InputToSAT);
      }

    if (bm->UserFlags.isSet("use-intervals", "1"))
      {
        EstablishIntervals intervals(*bm);
        simplified_solved_InputToSAT = intervals.topLevel_unsignedIntervals(simplified_solved_InputToSAT);
        bm->ASTNodeStats("After Establishing Intervals: ", simplified_solved_InputToSAT);
      }

    // Find pure literals.
    if (bm->UserFlags.isSet("pure-literals", "1"))
      {
        FindPureLiterals fpl;
        bool changed = fpl.topLevel(simplified_solved_InputToSAT, simp, bm);
        if (changed)
          {
            simplified_solved_InputToSAT = simp->applySubstitutionMap(simplified_solved_InputToSAT);
            simp->haveAppliedSubstitutionMap();
            bm->ASTNodeStats("After Pure Literals: ", simplified_solved_InputToSAT);
          }
      }

    // Simplify using Ite context
    if (bm->UserFlags.optimize_flag && bm->UserFlags.isSet("ite-context", "0"))
      {
        UseITEContext iteC(bm);
        simplified_solved_InputToSAT = iteC.topLevel(simplified_solved_InputToSAT);
        bm->ASTNodeStats("After ITE Context: ", simplified_solved_InputToSAT);
      }

    if (bm->UserFlags.isSet("aig-core-simplify", "0"))
      {
        AIGSimplifyPropositionalCore aigRR(bm);
        simplified_solved_InputToSAT = aigRR.topLevel(simplified_solved_InputToSAT);
        bm->ASTNodeStats("After AIG Core: ", simplified_solved_InputToSAT);
      }

    bm->ASTNodeStats("Before SimplifyWrites_Inplace begins: ", simplified_solved_InputToSAT);

    bm->SimplifyWrites_InPlace_Flag = true;
    bm->Begin_RemoveWrites = false;
    bm->start_abstracting = false;
    bm->TermsAlreadySeenMap_Clear();
    do
      {
        inputToSAT = simplified_solved_InputToSAT;

        if (bm->UserFlags.optimize_flag)
          {
            simplified_solved_InputToSAT = simp->CreateSubstitutionMap(simplified_solved_InputToSAT, arrayTransformer);

            if (simp->hasUnappliedSubstitutions())
              {
                simplified_solved_InputToSAT = simp->applySubstitutionMap(simplified_solved_InputToSAT);
                simp->haveAppliedSubstitutionMap();
              }

            bm->ASTNodeStats("after pure substitution: ", simplified_solved_InputToSAT);

            simplified_solved_InputToSAT = simp->SimplifyFormula_TopLevel(simplified_solved_InputToSAT, false);
            bm->ASTNodeStats("after simplification: ", simplified_solved_InputToSAT);


            if (bm->UserFlags.isSet("always-true", "0"))
              {
                SimplifyingNodeFactory nf(*(bm->hashingNodeFactory), *bm);
                AlwaysTrue always (simp,bm,&nf);
                simplified_solved_InputToSAT = always.topLevel(simplified_solved_InputToSAT);
                bm->ASTNodeStats("After removing always true: ", simplified_solved_InputToSAT);
              }
          }

        // The word level solver uses the simplifier to apply the rewrites it makes,
        // without optimisations enabled. It will enter infinite loops on some input.
        // Instead it could use the apply function of the substitution map, but it
        // doesn't yet...
        if (bm->UserFlags.wordlevel_solve_flag && bm->UserFlags.optimize_flag)
          {
            simplified_solved_InputToSAT = bvSolver->TopLevelBVSolve(simplified_solved_InputToSAT);
            bm->ASTNodeStats("after solving: ", simplified_solved_InputToSAT);
          }
      }
    while (inputToSAT != simplified_solved_InputToSAT);

    bm->ASTNodeStats("After SimplifyWrites_Inplace: ", simplified_solved_InputToSAT);

    if (bm->UserFlags.isSet("enable-unconstrained", "1"))
      {
        // Remove unconstrained.
        RemoveUnconstrained r(*bm);
        simplified_solved_InputToSAT = r.topLevel(simplified_solved_InputToSAT, simp);
        bm->ASTNodeStats("After Unconstrained Remove begins: ", simplified_solved_InputToSAT);
      }

    bm->TermsAlreadySeenMap_Clear();

    bm->start_abstracting = false;
    bm->SimplifyWrites_InPlace_Flag = false;
    bm->Begin_RemoveWrites = false;

    long final_difficulty_score = difficulty.score(simplified_solved_InputToSAT);
    if (bm->UserFlags.stats_flag)
      {
        cerr << "Initial Difficulty Score:" << initial_difficulty_score << endl;
        cerr << "Final Difficulty Score:" << final_difficulty_score << endl;
      }

    bool optimize_enabled = bm->UserFlags.optimize_flag;
    if (final_difficulty_score > 1.1 * initial_difficulty_score && !arrayops && bm->UserFlags.isSet(
        "difficulty-reversion", "1"))
      {
        // If the simplified problem is harder, than the
        // initial problem we revert back to the initial
        // problem.

        if (bm->UserFlags.stats_flag)
          cerr << "simplification made the problem harder, reverting." << endl;
        simplified_solved_InputToSAT = toRevertTo;

        // I do this to clear the substitution/solver map.
        // Not sure what would happen if it contained simplifications
        // that haven't been applied.
        simp->ClearAllTables();

        simp->Return_SolverMap()->insert(initialSolverMap.begin(), initialSolverMap.end());
        initialSolverMap.clear();

        // The arrayTransformer calls simplify. We don't want
        // it to put back in all the bad simplifications.
        bm->UserFlags.optimize_flag = false;
      }

    simplified_solved_InputToSAT = arrayTransformer->TransformFormula_TopLevel(simplified_solved_InputToSAT);
    bm->ASTNodeStats("after transformation: ", simplified_solved_InputToSAT);
    bm->TermsAlreadySeenMap_Clear();

    bm->UserFlags.optimize_flag = optimize_enabled;

    SOLVER_RETURN_TYPE res;
    if (bm->UserFlags.arrayread_refinement_flag)
      {
        bm->counterexample_checking_during_refinement = true;
      }

    // We are about to solve. Clear out all the memory associated with caches
    // that we won't need again.
    simp->ClearCaches();
    simp->haveAppliedSubstitutionMap();
    bm->ClearAllTables();

    // Deleting it clears out all the buckets associated with hashmaps etc. too.
    delete bvSolver;
    bvSolver = NULL;

    if (bm->UserFlags.stats_flag)
      simp->printCacheStatus();

    const bool maybeRefinement = arrayops && bm->UserFlags.arrayread_refinement_flag;

    simplifier::constantBitP::ConstantBitPropagation* cb = NULL;
    std::auto_ptr<simplifier::constantBitP::ConstantBitPropagation> cleaner;

    if (bm->UserFlags.bitConstantProp_flag)
      {
        bm->ASTNodeStats("Before Constant Bit Propagation begins: ", simplified_solved_InputToSAT);

        bm->GetRunTimes()->start(RunTimes::ConstantBitPropagation);
        cb = new simplifier::constantBitP::ConstantBitPropagation(simp, bm->defaultNodeFactory,
            simplified_solved_InputToSAT);
        cleaner.reset(cb);
        bm->GetRunTimes()->stop(RunTimes::ConstantBitPropagation);

        if (cb->isUnsatisfiable())
          simplified_solved_InputToSAT = bm->ASTFalse;
      }

    ToSATAIG toSATAIG(bm, cb);
    toSATAIG.setArrayTransformer(arrayTransformer);

    ToSATBase* satBase = bm->UserFlags.isSet("traditional-cnf", "0") ? tosat : ((ToSAT*) &toSATAIG) ;

    // If it doesn't contain array operations, use ABC's CNF generation.
    res = Ctr_Example->CallSAT_ResultCheck(NewSolver, simplified_solved_InputToSAT, orig_input, satBase,
        maybeRefinement);

    if (SOLVER_UNDECIDED != res)
      {
        // If the aig converter knows that it is never going to be called again,
        // it deletes the constant bit stuff before calling the SAT solver.
        if (toSATAIG.cbIsDestructed())
          cleaner.release();

        CountersAndStats("print_func_stats", bm);
        return res;
      }

    assert(arrayops); // should only go to abstraction refinement if there are array ops.
    assert(bm->UserFlags.arrayread_refinement_flag); // Refinement must be enabled too.

    // Unfortunately how I implemented the incremental CNF generator in ABC means that
    // cryptominisat and simplifying minisat may simplify away variables that we later need.

    res = Ctr_Example->SATBased_ArrayReadRefinement(NewSolver, simplified_solved_InputToSAT, orig_input, satBase);
    if (SOLVER_UNDECIDED != res)
      {
        if (toSATAIG.cbIsDestructed())
          cleaner.release();

        CountersAndStats("print_func_stats", bm);
        return res;
      }

    res = Ctr_Example->SATBased_ArrayWriteRefinement(NewSolver, orig_input, satBase);
    if (SOLVER_UNDECIDED != res)
      {
        if (toSATAIG.cbIsDestructed())
          cleaner.release();

        CountersAndStats("print_func_stats", bm);
        return res;
      }

    res = Ctr_Example->SATBased_ArrayReadRefinement(NewSolver, simplified_solved_InputToSAT, orig_input, satBase);
    if (SOLVER_UNDECIDED != res)
      {
        if (toSATAIG.cbIsDestructed())
          cleaner.release();

        CountersAndStats("print_func_stats", bm);
        return res;
      }

//    if (!bm->UserFlags.num_absrefine_flag)
      {
        FatalError("TopLevelSTPAux: reached the end without proper conclusion:"
          "either a divide by zero in the input or a bug in STP");
        //bogus return to make the compiler shut up
        return SOLVER_ERROR;
      }
  //  else
      {
       // return res;
      }
  } //End of TopLevelSTPAux
Esempio n. 20
0
 void calc_denom_exact(int *CODON_INDEX,                  int *AA_COUNT,                    double *PHI,
                 double *ELONG_PR,          double *MUTATION_RATES,           double *POP,                      
                 double *A_1,                       double *A_2,                      double *AT_BIAS,
                 double *BEE,                       int *IGNORE,                      double *GAMMA,                    
                 double *SCALE_FACTOR,              double *LIK,                      double *ETA_MEAN,
                 double *ETA_VAR,                   double *ETA_OBS,                  double *ETA_MIN,
                 double *ETA_MAX,                   double *BINS,                     int *NUM_BINS,
                 char **AA_VEC,                      char **CODON_VEC,                 int *N_CODON){

    
    int i;
    int increment_next=0, counter=0;
    int aa_index,aa_position,aa_count;
    int num_codons;
    int size_codon_space = 1;
    int t_codon_cts[61] = {0} ,t_codon_cts_rel[61] = {0};
    int t_codon_index[MAX_AA]; //holds temporary codon_index
    
    double t_eta,t_mu,t_fitness,t_mu_fitness; 
    double fitness_total = 0, mu_fitness_total=0, eta_total=0, eta_sq_total=0;

    
    double pcnt=0;
    
    double answer[2];
    
    double * bin_lims;
    bin_lims = malloc(sizeof(double)*(*(NUM_BINS)+1));

    //*=================================*
    //* 1) Process input passed from R  *
    //*=================================*
    
    Process_R_input(CODON_INDEX,ELONG_PR,MUTATION_RATES,AA_COUNT,
                        PHI, POP, A_1, A_2, AT_BIAS,
                        BEE, IGNORE, GAMMA, SCALE_FACTOR, AA_VEC,
                        CODON_VEC, N_CODON);

    Process_AA_Information();

    //* initialize codon structures
    Generate_Codon_Structures();
    
    aa_count=Sequence.aa_count;
    
    //*=================================*
    //* 2) Find size of codon space     *
    //*=================================*

    Convert_Codon_Index_to_AA_Index(Sequence.codon_index,
                            Sequence.aa_index, aa_count);

    for(i=0;i<aa_count;i++){
        aa_index = Sequence.aa_index[i];
        num_codons = AA[aa_index].num_codons;
        size_codon_space *= num_codons;
    }
    
    //*=====================================*
    //* 3) Find eta min/max and define bin  *
    //*    limits for eta hist              *
    //*=====================================*
        
    eta_min_max(Sequence.aa_index,Sequence.aa_count,answer);
    *ETA_MIN = *(answer);
    *ETA_MAX = *(answer + 1);

    intervals(*ETA_MIN,*ETA_MAX,*(NUM_BINS)+1,bin_lims);
    
    //*=====================================*
    //* 4) Initialize observed sequence and *
    //*    create temporary codon index     *
    //*=====================================*
        
        //* 4.1) Calculate eta_obs
    Sequence.eta_obs = Calc_Eta_NSE(&Sequence);
    
    //Rprintf("Eta_obs: %f\tSigma_obs: %f\txi_obs: %f\taa_count: %d\n",Sequence.eta_obs,Sequence.sigma_obs,Sequence.xi_obs,aa_count);
        
        //* 4.2) Find initial codon counts
    Codon_Counts(Sequence.codon_index,Sequence.codon_cts,aa_count);
    
    //Rprintf("Sequence.sigma_obs: %f\nSequence.xi_obs %f\nSequence.eta_obs %f\n",Sequence.sigma_obs,Sequence.xi_obs,Sequence.eta_obs);
    
        //* 4.3) Initialize temporary codon index (this will be incremented in the for loop to 
        //*      represent each permutation of the codon sequence). 
    for(i=0;i<aa_count;i++){
        aa_index = Sequence.aa_index[i];
        t_codon_index[i] = AA[aa_index].codon_index[0];
    }
    
        //* 4.4) Find codon counts of t_codon_index
    Codon_Counts(t_codon_index,t_codon_cts,aa_count);
    
        //* 4.5) Find (t_codon_cts - Sequence.codon_cts): This will give the codon counts relative
        //*      to the original seqeunce. When we calculate Mu from these counts, it will give us
        //*      mu_sim/mu_obs. 
    for(i=0;i<61;i++){ //assume no stop codons
        t_codon_cts_rel[i] = t_codon_cts[i] - Sequence.codon_cts[i];
    }
    
    //*=====================================*
    //* 5) Big ass for loop to calculate    *
    //*    fitness of every codon sequence  *
    //*    and keep running total           *
    //*=====================================*
    
    //Rprintf("Calculating denominator with %d synonymous sequences...\n",size_codon_space);
    

    for(i=0;i<size_codon_space;i++) {
        
        //*5.1) Calc Eta

          t_eta = Calc_Eta_NSE (&Sequence);

          
        //* 5.3) Calculate Mu*Fitness
        //*      Fitness = exp(-Q*Ne*phi*eta) or exp(-y*eta)
        //*      Mu=\prod_{i=1}^n(\mu_i)
        
          t_mu = Calc_Seq_Mu(t_codon_cts_rel,aa_count); //This gives mu_sim/mu_obs
        
          t_fitness = exp(-G.Q*G.Ne*Sequence.phi_obs*(t_eta-Sequence.eta_obs)); //calculate relative to eta_obs
                                                                            //to avoid very small number                                                                 
          t_mu_fitness = t_mu*t_fitness;
          
          //* Bin eta
          bin_eta(t_eta,BINS,bin_lims,t_mu_fitness,*NUM_BINS);
          
        //* 5.4) Add fitness to total fitness
        //*      Denominator of liklihood function is the sum of all 
        //*      fitnesses in codon space
        //*      Also add eta to total eta to find avg eta in codon space
          
          fitness_total += t_fitness;
          mu_fitness_total += t_mu_fitness;
          eta_total += t_eta;
          eta_sq_total += t_eta*t_eta;
        
        
        //* 5.5) Increment codon sequence
        //*      This algorithm increments the codon sequence by incrementing the codon index
        //*      of the first position. If the first position is at the highest codon index
        //*      for that amino acid, it resets the first position to the lowest codon index
        //*      for that amino acid and increments the second position. If the second position 
        //*      is at the highest codon index for that amino acid... and so on. This works 
        //*      similar to an old-fashioned rolling wheel counter.
        //*      We also need to update codon counts here...
        
          aa_position=0;
          
          do {
              increment_next=0; //assume that we do not increment the next position
              aa_index = Sequence.aa_index[aa_position];
              num_codons = AA[aa_index].num_codons;
              if(t_codon_index[aa_position] < AA[aa_index].codon_index[num_codons-1]){
              t_codon_cts_rel[t_codon_index[aa_position]++]--; //This line subtracts one from the codon count of the current codon
                                                               //and adds one to the current codon index
              t_codon_cts_rel[t_codon_index[aa_position]]++;   //This line adds one to the codon count of the next codon
              }else{
                    t_codon_cts_rel[t_codon_index[aa_position]]--; //Subtract 1 from codon count of current codon
                    t_codon_index[aa_position] = AA[aa_index].codon_index[0];
                    t_codon_cts_rel[t_codon_index[aa_position]]++; //Add 1 to codon count of next codon
                    increment_next = 1; //turn on increment flag 
              }
              aa_position++; //
          }while(increment_next && aa_position < aa_count);

          counter++;
          
          
          if(counter>size_codon_space/(pcnt*(double)size_codon_space/100)){
              //Rprintf('%d Percent Complete.\n',pcnt);
              pcnt++;
          }
    }
    
    
    //*==========================*
    //* 6) Calculate likelihood  *
    //*==========================*
         
    double likelihood,eta_mean,eta_var;
     
    likelihood = 1/(mu_fitness_total/size_codon_space);
    eta_mean = eta_total/size_codon_space;
    eta_var =  eta_sq_total/size_codon_space - pow(eta_mean,2); //E[x^2] - E[x]^2
          
    *LIK = likelihood;
    *ETA_MEAN = eta_mean;
    *ETA_VAR = eta_var;
    *ETA_OBS = Sequence.eta_obs;
    
    free(bin_lims);
}
std::vector< CHashMatch > CDataBase::searchByHash_offs( CHash hash ) const {
    std::vector< CHashMatch > result;
    std::ifstream hash_file;
    std::vector< std::pair< uint64_t, int64_t > > matches; // vector of pairs ( mel_id, offset )

    // hash offset cycle
    for( int64_t fixed_hash_offset = 0; 
            fixed_hash_offset < static_cast< int64_t >( hash.getLength() ) - static_cast< int64_t >( CFixedHash::length ); 
            ++fixed_hash_offset ) {

        CFixedHash fixed_hash( hash, fixed_hash_offset );
        std::string hash_filename = makeFilenameOfHash( fixed_hash );
        hash_file.open( makeFilenameOfHash( fixed_hash ),
                        std::fstream::in | std::fstream::binary );
        if( ! hash_file.is_open() ) {
            std::cout << "ERROR: Couldn't open hash file for reading: " << hash_filename << '\n';
            return result;
        }

        // fixed hash file read cycle
        while( hash_file.is_open() && hash_file.good() && hash_file.peek() != EOF ) {
            // read melody id
            uint64_t mel_id = 0;
            Raspoznavayka::mel_size_t mel_chm_offs = 0;
            if( ! ( read_number_from_file( mel_id, mel_number_size_koeff, hash_file ) // read melody id
                 && read_number_from_file( mel_chm_offs, mel_max_size_koeff, hash_file ) // read fixed hash match offset in melody
                 ) ) {
                return result;
            }
            
            int64_t total_offset = static_cast<int64_t>( mel_chm_offs ) - static_cast<int64_t>( fixed_hash_offset );
            // check distinct
            bool found = false;
            for( auto i = matches.begin(); i != matches.end(); ++i ) {
                if( i->first == mel_id && i->second == total_offset ) {
                    found = true;
                    break;
                }
            }
            if( !found ) {
                // push to matches
                matches.push_back( std::pair< uint64_t, int64_t >( mel_id, total_offset ) );
            }
        } // end of fixed hash file read cycle

        hash_file.close();
    } // end of hash offset cycle

    // read found melodies' data
    std::ifstream index_file;
    std::ifstream id3_file;
    std::ifstream mel_file;
    index_file.open( index_filename, std::fstream::in | std::fstream::binary );
    id3_file.open( id3_filename, std::fstream::in );
    mel_file.open( mel_filename, std::fstream::in | std::fstream::binary );
    if( ! ( index_file.is_open() && id3_file.is_open() && mel_file.is_open() ) ) {
        std::cout << "ERROR: Couldn't open some DB file for writing in " << directory << '\n';
        return result;
    }

    for( auto match = matches.begin(); match != matches.end(); ++match ) {
        int64_t mel_id = match->first, total_offset = match->second;
        // get index entry on this song
        index_file.seekg( mel_id * ( mel_number_size_koeff + mel_file_max_size_koeff ) );
        // get id3 and and melody addresses
        uint64_t id3_start = 0, id3_end = 0, mel_start = 0, mel_end = 0;
        if( ! ( read_number_from_file( id3_start, id3_file_max_size_koeff, index_file )
             && read_number_from_file( mel_start, mel_file_max_size_koeff, index_file )
             && read_number_from_file( id3_end, id3_file_max_size_koeff, index_file )
             && read_number_from_file( mel_end, mel_file_max_size_koeff, index_file )
             ) ) {
            return result;
        }
        // read id3 tags
        assert( id3_start < id3_end );
        assert( mel_start < mel_end );
        std::string artist, album, name, year;
        uint64_t record_size = id3_end - id3_start;
        if( ! id3_file.seekg( id3_start ).good() ) {
            std::cout << "ERROR: in id3 file\n";
            break;
        }
        std::getline( id3_file, artist );
        record_size -= id3_file.gcount();
        std::getline( id3_file, album );
        record_size -= id3_file.gcount();
        std::getline( id3_file, name );
        record_size -= id3_file.gcount();
        std::getline( id3_file, year );
        if( ! mel_file.seekg( mel_start ).good() ) {
            std::cout << "ERROR: in mel file\n";
            break;
        }
        std::vector< Raspoznavayka::interval_t > intervals( mel_end - mel_start );
        for( uint64_t i = 0; i < mel_end - mel_start; ++i ) {
            char interval;
            if( mel_file.get( interval ).fail() ) {
                std::cout << "ERROR: in mel file\n";
                break;
            }
            intervals[i] = static_cast< Raspoznavayka::interval_t >( interval );
        }
        CIDTag idtag( artist, album, name, std::atoi( year.c_str() ) );
        CInDBMelody new_melody( intervals, idtag );
        CHashMatch new_chm( &new_melody, total_offset );
        result.push_back( new_chm );
    } // end of found melodies' data read cycle

    id3_file.close();
    mel_file.close();
    index_file.close();
    return result;
}
std::vector< CInDBMelody > CDataBase::getEverything() const {
    std::vector< CInDBMelody > result;
    std::ifstream index_file;
    std::ifstream id3_file;
    std::ifstream mel_file;
    index_file.open( index_filename, std::fstream::in | std::fstream::binary );
    id3_file.open( id3_filename, std::fstream::in );
    mel_file.open( mel_filename, std::fstream::in | std::fstream::binary );
    if( ! ( index_file.is_open() && id3_file.is_open() && mel_file.is_open() ) ) {
        std::cout << "ERROR: Couldn't open some DB file for writing in " << directory << '\n';
        return result;
    }

    uint64_t id3_start = 0, id3_end = 0, mel_start = 0, mel_end = 0;
    // get id3 and and melody addresses
    if( ! ( read_number_from_file( id3_start, id3_file_max_size_koeff, index_file )
         && read_number_from_file( mel_start, mel_file_max_size_koeff, index_file )
         ) ) {
        return result;
    }
    while( index_file.good() && index_file.peek() != EOF ) {
        // get id3 and and melody addresses
        if( ! ( read_number_from_file( id3_end, id3_file_max_size_koeff, index_file )
             && read_number_from_file( mel_end, mel_file_max_size_koeff, index_file )
             ) ) {
            return result;
        }
        // read id3 tags
        assert( id3_start < id3_end );
        assert( mel_start < mel_end );
        std::string artist, album, name, year;
        uint64_t record_size = id3_end - id3_start;
        if( ! id3_file.seekg( id3_start ).good() ) {
            std::cout << "ERROR: in id3 file\n";
            break;
        }
        std::getline( id3_file, artist );
        record_size -= id3_file.gcount();
        std::getline( id3_file, album );
        record_size -= id3_file.gcount();
        std::getline( id3_file, name );
        record_size -= id3_file.gcount();
        std::getline( id3_file, year );
        if( ! mel_file.seekg( mel_start ).good() ) {
            std::cout << "ERROR: in mel file\n";
            break;
        }
        std::vector< Raspoznavayka::interval_t > intervals( mel_end - mel_start );
        for( uint64_t i = 0; i < mel_end - mel_start; ++i ) {
            char interval;
            if( mel_file.get( interval ).fail() ) {
                std::cout << "ERROR: in mel file\n";
                break;
            }
            intervals[i] = static_cast< Raspoznavayka::interval_t >( interval );
        }
        CIDTag idtag( artist, album, name, std::atoi( year.c_str() ) );
        CInDBMelody new_melody( intervals, idtag );
        result.push_back( new_melody );
        id3_start = id3_end;
        mel_start = mel_end;
    } // end of found melodies' data read cycle

    id3_file.close();
    mel_file.close();
    index_file.close();
    return result;
}