Example #1
0
void LinearLiteralPropagator::propagate_impl(ReifiedLinearConstraint& rl)
{
    assert(conf_.propStrength>=2);
    const LinearConstraint& l = rl.l;
    assert(l.getRelation()==LinearConstraint::Relation::LE);

    //std::cout << "trying to propagate_impl " << l << std::endl;
    propClause_.clear();
    auto min = computeMin(l, propClause_);
    if (min>l.getRhs())
    {
        /// shrink conflict
        if (conf_.propStrength>=4)
        {
            //std::cout << "prop4" << std::endl;
            /// find a set of iterators such that the sum of it is minimally bigger than rl.l.getRhs()
            auto& views = l.getViews();
            std::size_t index=0;
            while (index < views.size() && min-std::abs(views[index].a)>l.getRhs())
            {
                auto& i = views[index];
                auto wholeRange = vs_.getVariableStorage().getRestrictor(i);
                assert(wholeRange.size()>0);
                auto r = vs_.getVariableStorage().getCurrentRestrictor(i);
                int64 mmin = min - r.lower();
                //mm.second = minmax.second - r.upper();

                int64 up = l.getRhs() - mmin;

                if (up < wholeRange.lower()) // derive false
                {
                    propClause_[index] = wholeRange.begin(); // false lit
                    min = mmin + *(wholeRange.begin());
                }
                else
                {
                    assert(up < r.upper());
                    auto newUpper = order::wrap_upper_bound(wholeRange.begin(), r.end(), up);
                    //assert(newUpper != r.end()); /// should never be able to happen, as up < r.upper().first, so there is something which is smaller, this means we do not need r ?
                    propClause_[index] = newUpper;
                    min = mmin + *newUpper;
                }
                ++index;
            }
        }
        propClauses_.emplace_back(~rl.v,std::move(propClause_));
        return;
    }
}
//****************************************************************************
void DatabaseInformation::computeGMM_PairObject_AllFeat(int nclusters) {

  if (TESTFLAG) {
    cout << "Inside DBInfo compute GMM PAIR O: start." << endl;
  }

  learnedModelPairObject.reserve(NOBJECTCLASSES);
  int numberOfFeat = FMPairObject[0].size();       // 5; compute it
  int countFeat;

  // loop over reference object i
  for ( int i = 0 ; i < FMPairObject.size(); i++ ) {

    vector<cv::EM> internalEMvector;  // to work with vector of vector
    vector<vector<double> > meanNormalizationPair_currentRef;
    vector<vector<double> > stdNormalizationPair_currentRef;
    vector<vector<double> > mintmp;
    vector<vector<double> > maxtmp;

    // loop over target object j
    for ( int j = 0;  j < FMPairObject[i].size(); j++) {
    
      if (TESTFLAG) {
        cout << "Inside DBInfo compute GMM PAIR O: size of model is now: " << learnedModelPairObject.size() << " for object classes :   " << i << " and " << j << endl;
      }

      int numberOfScenes = FMPairObject[i][j].size();
      int countScene = 0;
      cv::Mat FeatMat = cv::Mat::zeros ( numberOfScenes, numberOfFeat,  CV_64F ); 
     
      for(vector<vector<FeatureInformation> >::iterator it = (FMPairObject[i][j]).begin(); it != (FMPairObject[i][j]).end(); ++it) {
        countFeat = 0;
        // for each feature of the current scene and belonging to current object
        for (vector<FeatureInformation>::iterator it2 = (*it).begin(); it2 != (*it).end(); ++it2) {
          FeatureInformation currentFeature = *it2;  
          vector<float> currentFeatureValues = currentFeature.getAllValues();  
          // depending on dimentionality of that feature: I add all values in current row
          
          for ( int k = 0; k < currentFeatureValues.size() ; k++ ) {
            FeatMat.at<double>(countScene, countFeat) = (double) (currentFeatureValues.at(k));
            countFeat++;
          }
        }
        countScene++; 
      } 

      //*****************************************************************
      // // NORMALIZATION of feature matrix

      cv::Mat FeatMatreduced = FeatMat.colRange(0, 5);     


      if (DEBUG) {cout << "Before normalization " << endl; }
      vector<double> meansVectorCurrentPair = computeMean(FeatMatreduced);
      vector<double> stdVectorCurrentPair = computeStd(FeatMatreduced, meansVectorCurrentPair);
      meanNormalizationPair_currentRef.push_back(meansVectorCurrentPair);
      stdNormalizationPair_currentRef.push_back(stdVectorCurrentPair);

      vector<double> maxVectorCurrentPair = computeMax(FeatMatreduced);  
      vector<double> minVectorCurrentPair = computeMin(FeatMatreduced);
      maxtmp.push_back(maxVectorCurrentPair);
      mintmp.push_back(minVectorCurrentPair);

      // compute weight based on STD of featuers
      double weight = computeStdWeights(FeatMatreduced, maxVectorCurrentPair, minVectorCurrentPair);

      cv::Mat normalizedFeatMat;
       if (NORMALIZEPAIR == 1) {
         normalizedFeatMat = doNormalization(FeatMatreduced, meansVectorCurrentPair, stdVectorCurrentPair);
       }
       else if (NORMALIZEPAIR == 2) {
         cout << "Before normalization Min Max do Nornmalization" << endl;
         normalizedFeatMat = doNormalizationMinMax(FeatMatreduced, maxVectorCurrentPair, minVectorCurrentPair);
       } 
       else {
         normalizedFeatMat = FeatMatreduced.clone();
       }
       //****************************************************************

      if (DEBUG) {
        cout << endl << endl << "Object : " << i << "and " << j << endl << 
           "The feature matrix dim is " << normalizedFeatMat.size() << endl;
        cout << endl << endl << "The feature matrix N ROWS is " << normalizedFeatMat.rows << endl;
        cout << endl <<  "The features are " << endl <<  normalizedFeatMat << endl;
      }

      //  Training EM model for the current object.  
      cv::EM em_model(nclusters);
      cout << endl << endl << "The feature matrix N ROWS is " << normalizedFeatMat.rows << endl;


      // Constraint: trains the GMM model for object pair features ONLY if the number of samples is sufficient!
      if (FeatMat.rows > 14) {

        if (TESTFLAG) { 
          std::cout << "Training the EM model for: "  << "Objects : " << i << " and " << j << endl << std::endl; 
        }
        em_model.train ( normalizedFeatMat );    
        if (DEBUG) { 
          std::cout << "Getting the parameters of the learned GMM model." << std::endl; 
        }
      }
      else {
          std::cout << "NOT Training the EM model for: "  << "Objects : " << i << " and " << j << endl << std::endl; 
      }
      internalEMvector.push_back(em_model);
    }
    learnedModelPairObject.push_back(internalEMvector); 
    meanNormalizationPair.push_back(meanNormalizationPair_currentRef);
    stdNormalizationPair.push_back(stdNormalizationPair_currentRef);
    minFeatPair.push_back(mintmp);
    maxFeatPair.push_back(maxtmp);
  }
}