std::pair<utils::Point,utils::Point> CurvatureFeatureExtractor::backproject(core::DataSegment<float, 4> &normals, 
                     std::pair<utils::Point,utils::Point> &histoExtremalBins, std::vector<int> &surface, int w){
   std::vector<int> imgMinPoints=backprojectPointIDs(normals, histoExtremalBins.first, surface);
   std::vector<int> imgMaxPoints=backprojectPointIDs(normals, histoExtremalBins.second, surface);
   
   std::pair<utils::Point,utils::Point> imgMeans;
   imgMeans.first = computeMean(imgMinPoints, w);
   imgMeans.second = computeMean(imgMaxPoints, w);
   return imgMeans;
 }
// normalize ALL the values for ALL object pairs. Then compute STD. // TO DO
double DatabaseInformation::computeStdWeights(cv::Mat FeatMat, vector<double> maxvector, vector<double> minvector) {

  // the normalizde feature matrix
  cv::Mat normalizedFeatMat = FeatMat.clone(); 
  //cv::Mat normalizedFeatMat = doNormalizationMinMax(FeatMat, maxvector, minvector);
  cout << endl << FeatMat << endl;
  // compute the STD on the normalizaed feature matrix
  vector<double> meansVector = computeMean(normalizedFeatMat);
  vector<double> stdVector = computeStd(normalizedFeatMat, meansVector);
  // compute a WEIGHT for the considere OBJECT-PAIR (to be used in scene simlarity score) based on std
  double magnitude = 1;
  for (int i = 0; i < stdVector.size(); i++) {
    if ( i != 2 ) {
      magnitude = magnitude * (stdVector[i]);    // += (pow(stdVector[i], 2));  // magnitude * stdVector[i];   
      if (TESTFLAG) {
        cout << "in DatabaseInformation::computeStdWeights. std vector: " << (stdVector[i]) << endl;
      }
    }
  }
  //magnitude = sqrt(magnitude);
  double out = 1 / magnitude ; //* FeatMat.rows / 100;
  cout << "in DatabaseInformation::computeStdWeights. Final: " << out << endl;
  return out;

}
// Compute standard deviation
double computeStdDev(double data[], int length){
  double sum = 0.0;
  double mean = computeMean(data, length);
  int i;
  for(i = 0; i < length; ++i){
    sum = sum + pow((data[i]-mean), 2);
  }
  return sqrt(sum/length);
}
Example #4
0
/*!

  Perform the Hinkley test. A downward jump is detected if \f$ M_k - S_k >
  \alpha \f$. An upward jump is detected if \f$ T_k - N_k > \alpha \f$.

  \param signal : Observed signal \f$ s(t) \f$.

  \sa setDelta(), setAlpha(), testDownwardJump(), testUpwardJump()

*/
vpHinkley::vpHinkleyJumpType vpHinkley::testDownUpwardJump(double signal)
{

  vpHinkleyJumpType jump = noJump;

  nsignal ++; // Signal length

  if (nsignal == 1) mean = signal;

  // Calcul des variables cumulees
  computeSk(signal);
  computeTk(signal);

  computeMk();
  computeNk();

  vpCDEBUG(2) << "alpha: " << alpha << " dmin2: " << dmin2
	      << " signal: " << signal
	      << " Sk: " << Sk << " Mk: " << Mk
	      << " Tk: " << Tk << " Nk: " << Nk << std::endl;

  // teste si les variables cumulees excedent le seuil
  if ((Mk - Sk) > alpha)
    jump = downwardJump;
  else if ((Tk - Nk) > alpha)
    jump = upwardJump;

#ifdef VP_DEBUG
  if (VP_DEBUG_MODE >= 2) {
    switch(jump) {
    case noJump:
      std::cout << "noJump " << std::endl;
     break;
    case downwardJump:
      std::cout << "downWardJump " << std::endl;
      break;
    case upwardJump:
      std::cout << "upwardJump " << std::endl;
      break;
    }
  }
#endif

  computeMean(signal);

  if ((jump == upwardJump) || (jump == downwardJump)) {
    vpCDEBUG(2) << "\n*** Reset the Hinkley test  ***\n";

    Sk = 0; Mk = 0; Tk = 0; Nk = 0;  nsignal = 0;
    // Debut modif FS le 03/09/2003
    mean = signal;
    // Fin modif FS le 03/09/2003
  }

  return (jump);
}
    float StatisticalStandardMeasures::computeCovariance(const float* data1, const float* data2, size_t numberOfElements, bool unbiasedEstimate)
    {
        float covarianceValue = 0.0f;
        float meanValueData1 = computeMean(data1, numberOfElements);
        float meanValueData2 = computeMean(data2, numberOfElements);

        for (unsigned int i = 0; i < numberOfElements; i++)
        {
            covarianceValue += (data1[i] - meanValueData1) * (data2[i] - meanValueData2);
        }

        if (unbiasedEstimate)
        {
            return (covarianceValue / (numberOfElements - 1));
        }
        else
        {
            return (covarianceValue / numberOfElements);
        }
    }
Example #6
0
double computeCov(double* dfFtrs, double *dfSc, int f, int N){
    int i;
    double cov, *x;
    
    x = (double*) mxCalloc( N, sizeof(double) );
    for (i=0;i<N;i++){
        x[i]=dfFtrs[i+f*N]*dfSc[i];
    }
    cov = computeMean(x,N);
    mxFree(x);
    return cov;
}
Example #7
0
 ReturnCode computeMeanSigma(Result &mean, Result &sigma, const Values &values)
 {
     MSS_BEGIN(ReturnCode);
     MSS(values.size() >= 2);
     MSS(computeMean(mean, values));
     sigma = 0.0;
     for (auto value: values)
         sigma += (value-mean)*(value-mean);
     sigma /= values.size()-1;
     sigma = ::sqrt(sigma);
     MSS_END();
 }
Example #8
0
void BGPattern::maskImage(cv::Mat src, cv::Mat dst)
{
    // accept only char type matrices
    assert(src.depth() == CV_8U && dst.depth() == CV_8U);
    assert(src.channels() == 3 && dst.channels() == 1);
    assert(src.rows == dst.rows && src.cols == dst.cols);
    
    int rows = src.rows, cols = dst.cols;
    int npix = rows * cols;
    uchar *src_data = (uchar*)src.data;
    uchar *dst_data = (uchar*)dst.data;
    
//    for (unsigned int row = 0; row < rows; ++row)
//    {
//        uchar* src_row = src_data + 
//        for (unsigned int col = 0; col < col; ++col)
//    }
    
    double val0[3] = {m_curr[0], m_curr[1], m_curr[2]};
    // find new centroid
    reset();
    /*
    for (unsigned int i = 0; i < npix; ++i )
    {
//        std::cout << (int)src_data[0] << ", " << (int)src_data[1] << ", " << (int)src_data[2] << " " << addPixel(src_data)<< std::endl;
        
        addPixel(src_data);
        
        // move pointers
        src_data += 3;
    }
    computeMean();
    
    src_data = (uchar*)src.data; // reset pointer to beginning of image
    */
    
    for (unsigned int i = 0; i < npix; ++i ) // for next frame...
    {
        if (dist(src_data) < m_dist_th)
            dst_data[0] = 0; // 1 keeps background; 0 keeps objects
        else
            dst_data[0] = 1;
        
        // move pointers
        src_data += 3;
        dst_data += 1;
    }
    computeMean(); // for next frame...
//    std::cout << "center moved: " << dist(val0) << std::endl;
    
}
Example #9
0
/*!

  Perform the Hinkley test. A downward jump is detected if
  \f$ M_k - S_k > \alpha \f$.

  \param signal : Observed signal \f$ s(t) \f$.

  \sa setDelta(), setAlpha(), testUpwardJump()

*/
vpHinkley::vpHinkleyJumpType vpHinkley::testDownwardJump(double signal)
{

  vpHinkleyJumpType jump = noJump;

  nsignal ++; // Signal lenght

  if (nsignal == 1) mean = signal;

  // Calcul des variables cumulées
  computeSk(signal);

  computeMk();

  vpCDEBUG(2) << "alpha: " << alpha << " dmin2: " << dmin2
	    << " signal: " << signal << " Sk: " << Sk << " Mk: " << Mk;

  // teste si les variables cumulées excèdent le seuil
  if ((Mk - Sk) > alpha)
    jump = downwardJump;

#ifdef VP_DEBUG
  if (VP_DEBUG_MODE >=2) {
    switch(jump) {
    case noJump:
      std::cout << "noJump " << std::endl;
     break;
    case downwardJump:
      std::cout << "downWardJump " << std::endl;
      break;
    case upwardJump:
      std::cout << "upwardJump " << std::endl;
      break;
    }
  }
#endif

  computeMean(signal);

  if (jump == downwardJump)  {
    vpCDEBUG(2) << "\n*** Reset the Hinkley test  ***\n";

    Sk = 0; Mk = 0;  nsignal = 0;
  }

  return (jump);
}
Example #10
0
/*!

  Perform the Hinkley test. An upward jump is detected if \f$ T_k - N_k >
  \alpha \f$.

  \param signal : Observed signal \f$ s(t) \f$.

  \sa setDelta(), setAlpha(), testDownwardJump()

*/
vpHinkley::vpHinkleyJumpType vpHinkley::testUpwardJump(double signal)
{

  vpHinkleyJumpType jump = noJump;

  nsignal ++; // Signal length

  if (nsignal == 1) mean = signal;

  // Calcul des variables cumulees
  computeTk(signal);

  computeNk();

  vpCDEBUG(2) << "alpha: " << alpha << " dmin2: " << dmin2
	    << " signal: " << signal << " Tk: " << Tk << " Nk: " << Nk;

  // teste si les variables cumulees excedent le seuil
  if ((Tk - Nk) > alpha)
    jump = upwardJump;

#ifdef VP_DEBUG
  if (VP_DEBUG_MODE >= 2) {
    switch(jump) {
    case noJump:
      std::cout << "noJump " << std::endl;
     break;
    case downwardJump:
      std::cout << "downWardJump " << std::endl;
      break;
    case upwardJump:
      std::cout << "upWardJump " << std::endl;
      break;
    }
  }
#endif
  computeMean(signal);

  if (jump == upwardJump)  {
    vpCDEBUG(2) << "\n*** Reset the Hinkley test  ***\n";

    Tk = 0; Nk = 0;  nsignal = 0;
  }

  return (jump);
}
Example #11
0
void Correlation::computeCovariance()
{
  if(nbsample==0)
    {
      cerr << "ERROR: in Correlation::computeCovariance, chain size is 0, exit\n";
      cerr.flush();
      exit(0);
    } else {
      createCovarianceBuffer();
      computeMean();

      for(int t=0;t<nbsample/2;t++)
	{
	  for(int j=0;j<nbparameter;j++)
	    {
	      for(int i=0;i<nbsample-t;i++)
		{
		  covparam[j][t]+=(parameters[j][i]-meanparam[j])*(parameters[j][i+t]-meanparam[j]);
		}
	      if(isConstant[j]==Yes)
		covparam[j][t]=0;
	    }
	  if(t==0)
	    {
	      for(int j=0;j<nbparameter;j++)
		{
		  variance[j]=covparam[j][0]/(double)(nbsample-1);
		}
	    } 
	  for(int j=0;j<nbparameter;j++)
	    {
	      covparam[j][t]/=(double)(nbsample);
	      // normalisation
	      if(covparam[j][0]!=0)
		covnorm[j][t]=covparam[j][t]/covparam[j][0];
	      else
		covnorm[j][t]=0;
	    }
	} // fin for t

    }
}
    float StatisticalStandardMeasures::computeVariance(const float* data, size_t numberOfElements, bool unbiasedEstimate)
    {
        float sdValue = 0.0f;
        float meanValue = computeMean(data, numberOfElements);

        for (unsigned int i = 0; i < numberOfElements; i++)
        {
            sdValue += (data[i] - meanValue) * (data[i] - meanValue);
        }

        if (unbiasedEstimate)
        {
            sdValue /= (numberOfElements - 1);
        }
        else
        {
            sdValue /= numberOfElements;
        }

        return sdValue;
    }
Example #13
0
void
Delta_vv_t::hybridFit(const std::vector<double> & subData,
                      const std::vector<double> & subSensor)
{
    Logger * log = Logger::get_instance();

    log->debug(boost::format("Entering function %1%")
              % __PRETTY_FUNCTION__);
    log->increase_indent();

    // These constants are used to flag "bad" data
    const double stdDevData = computeVariance(subData);
    const double meanSubData = computeMean(subData);
    const double stdDevSensor = computeVariance(subSensor);
    const double meanSubSensor = computeMean(subSensor);

    log->debug(boost::format("stdDevData = %1%, meanSubData = %2%, "
                             "stdDevSensor = %3%, meanSubSensor = %4%")
               % stdDevData
               % meanSubData
               % stdDevSensor
               % meanSubSensor);

    // Mask missing pid
    log->debug("Going to mask those pIDs for which gains are too «strange»");
    log->increase_indent();

    std::vector<double> locData;
    std::vector<double> locGains;
    std::vector<double> locDipole;
    std::vector<double> locSensor;
    for (unsigned int idx=0; idx<pid.size(); ++idx) {
        if ((gain[idx]==0)||(std::isinf(gain[idx]))
            ||(std::isnan(subSensor[idx]))||(subSensor[idx]==0)
            ||(subSensor[idx]>(meanSubSensor+5*stdDevSensor))||(subSensor[idx]<(meanSubSensor-5*stdDevSensor))
            ||(std::isnan(subData[idx]))||(subData[idx]==0)
            ||(subData[idx]>(meanSubData+5*stdDevData))||(subData[idx]<(meanSubData-5*stdDevData)))
        {
            log->debug(boost::format("Skipping pid %1%")
                       % pid[idx]);
            continue;
        }

        locData.push_back(subData[idx]);
        locGains.push_back(gain[idx]);
        locDipole.push_back(dipole[idx]);
        locSensor.push_back(subSensor[idx]);
    }
    log->decrease_indent();
    log->debug(boost::format("Done, %1% elements kept for the fit")
               % locData.size());

    double meanSensor = computeMean(locSensor);

    // Multi-parameters fitting
    // Prepare data
    gsl_matrix *data, *cov;
    gsl_vector *loadVolt, *weights, *coeff;

    data = gsl_matrix_alloc (locData.size(), 2);
    loadVolt = gsl_vector_alloc (locData.size());
    weights = gsl_vector_alloc (locData.size());

    coeff = gsl_vector_alloc (2);
    cov = gsl_matrix_alloc (2, 2);

    for(size_t idx = 0; idx < locData.size(); idx++) {
        gsl_matrix_set (data, idx, 0, 1.0);
        gsl_matrix_set (data, idx, 1, locSensor[idx] - meanSensor);

        gsl_vector_set (loadVolt, idx, locData[idx] * locGains[idx]);
        gsl_vector_set (weights, idx, std::fabs(locDipole[idx]));
    }

    // Fit
    double chisq;
    gsl_multifit_linear_workspace * work =
        gsl_multifit_linear_alloc (locData.size(), 2);
    gsl_multifit_wlinear (data, weights, loadVolt, coeff, cov,
                          &chisq, work);
    gsl_multifit_linear_free (work);

    log->debug(boost::format("Interpolation coefficients: c_0 = %1%, "
                             "c_1 = %2%, \u03c7\u00b2 = %3%")
               % gsl_vector_get(coeff, 0)
               % gsl_vector_get(coeff, 1)
               % chisq);

    // Save results
    std::vector<double> returnGains;
    std::vector<int> returnPids;
    for (unsigned int idx=0; idx<pid.size(); ++idx) {
        if ((gain[idx]==0)||(std::isinf(gain[idx]))
            ||(std::isnan(subSensor[idx]))||(subSensor[idx]==0)
            ||(subSensor[idx]>(meanSubSensor+5*stdDevSensor))||(subSensor[idx]<(meanSubSensor-5*stdDevSensor))
            ||(std::isnan(subData[idx]))||(subData[idx]==0)
            ||(subData[idx]>(meanSubData+5*stdDevData))||(subData[idx]<(meanSubData-5*stdDevData)))
            continue;

        returnGains.push_back((gsl_vector_get(coeff,0)+gsl_vector_get(coeff,1)*(subSensor[idx]-meanSensor))/subData[idx]);
        returnPids.push_back(pid[idx]);
    }

    // Memory free
    gsl_matrix_free (data);
    gsl_vector_free (loadVolt);
    gsl_vector_free (weights);
    gsl_vector_free (coeff);
    gsl_matrix_free (cov);

    gain.swap(returnGains);
    pid.swap(returnPids);

    log->decrease_indent();
    log->debug(boost::format("Quitting function %1%")
              % __PRETTY_FUNCTION__);
}
Example #14
0
void updateBoundaryWater(double deltaT)
{
    double boundaryPsi, boundarySe, boundaryK, meanK;
    double const EPSILON_mm = 0.0001;          //0.1 mm
    double area, boundarySide, boundaryArea, Hs, avgH, maxFlow, flow;

    for (long i = 0; i < myStructure.nrNodes; i++)
    {
        /*! extern sink/source */
        myNode[i].Qw = myNode[i].waterSinkSource;

        if (myNode[i].boundary != NULL)
        {
            /*! initialize */
            myNode[i].boundary->waterFlow = 0.;
            if (myNode[i].boundary->type == BOUNDARY_RUNOFF)
            {
                /*! current surface water available to runoff [m] */
                avgH = (myNode[i].H + myNode[i].oldH) * 0.5;
                Hs = maxValue(avgH - (myNode[i].z + myNode[i].Soil->Pond), 0.0);
                if (Hs > EPSILON_mm)
                {
                    area = myNode[i].volume_area;       /*!<  [m^2] (surface)  */
                    boundarySide = sqrt(area);          /*!<  [m] approximation: side = sqrt(area)  */
                    maxFlow = (Hs * area) / deltaT;     /*!<  [m^3 s^-1] max available flow in time step  */
                    boundaryArea = boundarySide * Hs;   /*!<  [m^2]  */
                    /*! [m^3 s^-1] Manning */
                    flow = boundaryArea *(pow(Hs, (2./3.)) / myNode[i].Soil->Roughness) * sqrt(myNode[i].boundary->slope);
                    myNode[i].boundary->waterFlow = -minValue(flow, maxFlow);
                }
            }
            else if (myNode[i].boundary->type == BOUNDARY_FREEDRAINAGE)
            {
                /*! [m^3 s^-1] Darcy unit gradient */
                /*! dH=dz=L  ->  q=K(h) */
                myNode[i].boundary->waterFlow = -myNode[i].k * myNode[i].up.area;
            }

            else if (myNode[i].boundary->type == BOUNDARY_FREELATERALDRAINAGE)
            {
                // TODO approximation: boundary area equal to other lateral link
				area = myNode[i].lateral[0].area;
                /*! [m^3 s^-1] Darcy,  gradient = slope (dH=dz) */
                myNode[i].boundary->waterFlow = -myNode[i].k * area * myNode[i].boundary->slope
                                            * myParameters.k_lateral_vertical_ratio;
            }

            else if (myNode[i].boundary->type == BOUNDARY_PRESCRIBEDTOTALPOTENTIAL)
            {
                if (myNode[i].boundary->prescribedTotalPotential >= myNode[i].z)
                    boundaryK = myNode[i].Soil->K_sat;
                else
                {
                    boundaryPsi = fabs(myNode[i].boundary->prescribedTotalPotential - myNode[i].z);
                    boundarySe = computeSefromPsi(boundaryPsi, myNode[i].Soil);
                    boundaryK = computeWaterConductivity(boundarySe, myNode[i].Soil);
                }
                meanK = computeMean(myNode[i].k, boundaryK);
                myNode[i].boundary->waterFlow = meanK * (myNode[i].boundary->prescribedTotalPotential - myNode[i].H) * myNode[i].up.area;
            }

            myNode[i].Qw += myNode[i].boundary->waterFlow;
        }
    }
}
//****************************************************************************
void DatabaseInformation::computeGMM_SingleObject_AllFeat(int nclusters) {
  int fsize = 9;        // to do: compute it
  int countFeat;
  if (DEBUG) {
    cout << endl << endl << "Starting Compute GMM for Single Objects / All Feats " << endl << endl;
  }

  // for each considered object class ("i" is also the object class ID as stored in Object-> actualObjectID)
  for ( int i = 0 ; i < FMSingleObject.size(); i++ ) {

    if (TESTFLAG)  {cout << "Current object :  " << i << endl; }
    
    // inizialize the feature matrix "FeatMat" for current object class, as a cv::Mat object
    cv::Mat FeatMat = cv::Mat::zeros ( FMSingleObject.at(i).size(), fsize,  CV_64F ); 

    int countScene = 0;
 
    // for each scene in the training database
    for(vector<vector<FeatureInformation> >::iterator it = (FMSingleObject.at(i)).begin(); it != (FMSingleObject.at(i)).end(); ++it) {
      
      // (*it) is a vector of FI
      countFeat = 0;
      // for each feature of the current scene - and belonging to current object class
      for (vector<FeatureInformation>::iterator it2 = (*it).begin(); it2 != (*it).end(); ++it2) {
        FeatureInformation currentFeature = *it2;  
        vector<float> currentFeatureValues = currentFeature.getAllValues();  
        
        // depending on dimentionality of that feature: I add all values in current row
        for ( int j = 0; j < currentFeatureValues.size() ; j++ ) {
          FeatMat.at<double>(countScene, countFeat) = (double) (currentFeatureValues.at(j));
          countFeat++;
        }
      }
      countScene++; 
    }

    // obtain "FeatMat" : <numberOfScenes x numberOfFeatures> (meaning 1-D features)

    // **********************************************************************
    // // // NORMALIZATION
    // //    Normalize the feature matrix "FeatMat": 
    
    vector<double> meansVectorCurrentObject = computeMean(FeatMat);
    vector<double> stdVectorCurrentObject = computeStd(FeatMat, meansVectorCurrentObject);

    //cv::Mat normalizedFeatMat = doNormalization(FeatMat, meansVectorCurrentObject, stdVectorCurrentObject);

    meanNormalization.push_back(meansVectorCurrentObject);
    stdNormalization.push_back(stdVectorCurrentObject);

    cv::Mat normalizedFeatMat = FeatMat.clone();

    /* // old version
    cv::Mat normalizedFeatMat = FeatMat.clone();
    vector<double> currentObjectMean;
    vector<double> currentObjectStd;
    // for each column i.e. each 1-D feature
    for ( int icolumn = 0 ; icolumn < FeatMat.cols; icolumn++ ) {
      cv::Mat currentCol = FeatMat.col(icolumn);
      double s = 0;
      double s_std = 0;
      for (int c = 0; c < currentCol.rows ; c++ ) {
        s += currentCol.at<double>(c);
      }
      // compute mean
      double _mean = s / currentCol.rows; 
      // compute std
      for (int c = 0; c < currentCol.rows ; c++ ) {
        s_std += pow( ( currentCol.at<double>(c) - _mean) , 2);
      }   
      double _std =sqrt(s_std / currentCol.rows);   
      if (_std == 0) { _std = 1; }
      // store the mean and std values for the current object class and the current feature
      currentObjectMean.push_back(_mean);
      currentObjectStd.push_back(_std);
      // normalize the current column (i.e. feature) of the feature matrix
      for (int c = 0 ; c < currentCol.rows ; c++ ) {
        normalizedFeatMat.at<double>(c, icolumn) = (currentCol.at<double>(c) - _mean) / (_std );
      }
    }
    // store all the mean and std values for all the different features, for the considered object class.
    meanNormalization.push_back(currentObjectMean);
    stdNormalization.push_back(currentObjectStd);
    */

    // *************************************************************************
    // // end NORMALIZATION feature matrix.
    // **********************************************************************

    // Test for feature relevance experiments ->
    // test: select lower dimensionality of feature matrix   
  
    cv::Mat featsTrain = normalizedFeatMat.colRange(0, 9);    
    /*
    cv::Mat featsTrain = cv::Mat(normalizedFeatMat.rows, 4, CV_64F);   
    normalizedFeatMat.col(0).copyTo(featsTrain.col(0));
    normalizedFeatMat.col(1).copyTo(featsTrain.col(1));
    normalizedFeatMat.col(3).copyTo(featsTrain.col(2));
    normalizedFeatMat.col(4).copyTo(featsTrain.col(3));
    */

    if (DEBUG) {
      cout << endl << endl << "Object : " << i << endl << 
         "The feature matrix dim is " << FeatMat.size() << endl;
      cout << endl << endl << "The feature matrix N ROWS is " << FeatMat.rows << endl;
      cout << endl <<  "The features are " << endl <<  FeatMat << endl;
    }
    // END test lower feature dimensionality 
    // **********************************************************************

    //  Training EM model for the current object.  
    cv::EM em_model(nclusters);
    if (DEBUG) { 
      std::cout << "Training the EM model." << std::endl; 
    }
    em_model.train ( featsTrain );    // normalizedFeatMat to change
    if (DEBUG) { 
      std::cout << "Getting the parameters of the learned GMM model." << std::endl; 
    }
    learnedModelSingleObject.push_back(em_model);


    // **************************************************************************
    // testing on the same training database:

    cv::Mat _means = em_model.get<cv::Mat>("means");
    cv::Mat _weights = em_model.get<cv::Mat> ("weights");
    vector<cv::Mat> _covs = em_model.get<vector<cv::Mat> > ("covs");

    double minProb = 1000;
    for (int zz = 0; zz < featsTrain.rows; zz++) {
      cv::Mat featsTrainScene = featsTrain.row(zz);
      double prob = computeGMMProbability(featsTrainScene, _means, _covs, _weights);
      prob = log(prob);
      if (prob < minProb) {
        minProb = prob;
      }
      // cout << "Model  " << i << " prob " << prob << endl;
    }
    cout << "Model  " << i << "  minprob  " << minProb << endl << endl;

    thresholds.push_back(minProb);

    // **************************************************************************

    if (DEBUG) {
      cout << "Inside DBInfo compute GMM SO: size of model is now: " << learnedModelSingleObject.size() << endl;
    }


    if (DEBUG) { 
     
       std::cout << "The size of the means is:  " << _means.size()  <<
       "  and weights : " << _weights.size()  << std::endl << 
       std::endl;
       cout << "The mean matrix of current GMM model is : "  << _means << endl;
       cout << "The weights are : " << _weights << endl;
      
    }
  }  
}
 float StatisticalStandardMeasures::computeMean(const std::vector<float> data)
 {
     return computeMean(&data[0], (unsigned int)data.size());
 }
Example #17
0
// An implementation of the Pyramidal Lucas-Kanade Optical Flow algorithm.
// See http://robots.stanford.edu/cs223b04/algo_tracking.pdf for details.
bool OpticalFlow::findFlowAtPoint(const float32 u_x, const float32 u_y,
                                  float32* final_x, float32* final_y) const {
  const float32 threshold_squared = square(THRESHOLD);

  // Initial guess.
  float32 g_x = 0.0f;
  float32 g_y = 0.0f;

  // For every level in the pyramid, update the coordinates of the best match.
  for (int32 l = NUM_LEVELS - 1; l >= 0; --l) {
    // Shrink factor from original.
    const int32 shrink_factor = (1 << l);

    // Images I (prev) and J (next).
    const Image<uint8>& img_I = *frame1_->pyramid_[l];
    const Image<uint8>& img_J = *frame2_->pyramid_[l];

    // Computed gradients.
    const Image<int32>& I_x = *frame1_->spatial_x_[l];
    const Image<int32>& I_y = *frame1_->spatial_y_[l];

    // Image position vector (p := u^l), scaled for this level.
    const float32 p_x = u_x / static_cast<float32>(shrink_factor);
    const float32 p_y = u_y / static_cast<float32>(shrink_factor);

    // LOGV("Level %d: (%d, %d) / %d -> (%d, %d)",
    //      l, u_x, u_y, shrink_factor, p_x, p_y);

    // Get values for frame 1.  They remain constant through the inner
    // iteration loop.
    float32 vals_I[ARRAY_SIZE];
    float32 vals_I_x[ARRAY_SIZE];
    float32 vals_I_y[ARRAY_SIZE];

    int32 val_idx = 0;
    for (int32 win_x = -WINDOW_SIZE; win_x <= WINDOW_SIZE; ++win_x) {
      for (int32 win_y = -WINDOW_SIZE; win_y <= WINDOW_SIZE; ++win_y) {
        const float32 x_pos = p_x + win_x;
        const float32 y_pos = p_y + win_y;

        if (!img_I.validInterpPixel(x_pos, y_pos)) {
          return false;
        }

        vals_I[val_idx] = img_I.getPixelInterp(x_pos, y_pos);

        vals_I_x[val_idx] = I_x.getPixelInterp(x_pos, y_pos);
        vals_I_y[val_idx] = I_y.getPixelInterp(x_pos, y_pos);

        ++val_idx;
      }
    }

    // Compute the spatial gradient matrix about point p.
    float32 G[] = { 0, 0, 0, 0 };
    calculateG(vals_I_x, vals_I_y, ARRAY_SIZE, G);

    // Find the inverse of G.
    float32 G_inv[4];
    if (!invert2x2(G, G_inv)) {
      // If we can't invert, hope that the next level will have better luck.
      continue;
    }

#ifdef NORMALIZE
    const float32 mean_I = computeMean(vals_I, ARRAY_SIZE);
    const float32 std_dev_I = computeStdDev(vals_I, ARRAY_SIZE, mean_I);
#endif

    // Iterate NUM_ITERATIONS times or until we converge.
    for (int32 iteration = 0; iteration < NUM_ITERATIONS; ++iteration) {
      // Get values for frame 2.
      float32 vals_J[ARRAY_SIZE];
      int32 val_idx = 0;
      for (int32 win_x = -WINDOW_SIZE; win_x <= WINDOW_SIZE; ++win_x) {
        for (int32 win_y = -WINDOW_SIZE; win_y <= WINDOW_SIZE; ++win_y) {
          const float32 x_pos = p_x + win_x + g_x;
          const float32 y_pos = p_y + win_y + g_y;

          if (!img_I.validInterpPixel(x_pos, y_pos)) {
            return false;
          }

          vals_J[val_idx] = img_J.getPixelInterp(x_pos, y_pos);

          ++val_idx;
        }
      }

#ifdef NORMALIZE
      const float32 mean_J = computeMean(vals_J, ARRAY_SIZE);
      const float32 std_dev_J = computeStdDev(vals_J, ARRAY_SIZE, mean_J);

      const float32 std_dev_ratio = std_dev_I / std_dev_J;
#endif

      // Compute image mismatch vector.
      float32 b_x = 0.0f;
      float32 b_y = 0.0f;
      val_idx = 0;
      for (int32 win_x = -WINDOW_SIZE; win_x <= WINDOW_SIZE; ++win_x) {
        for (int32 win_y = -WINDOW_SIZE; win_y <= WINDOW_SIZE; ++win_y) {
          // Normalized Image difference.

#ifdef NORMALIZE
          const float32 dI = (vals_I[val_idx] - mean_I) -
                             (vals_J[val_idx] - mean_J) * std_dev_ratio;
#else
          const float32 dI = vals_I[val_idx] - vals_J[val_idx];
#endif

          b_x += dI * vals_I_x[val_idx];
          b_y += dI * vals_I_y[val_idx];

          ++val_idx;
        }
      }

      // Optical flow... solve n = G^-1 * b
      const float32 n_x = (G_inv[0] * b_x) + (G_inv[1] * b_y);
      const float32 n_y = (G_inv[2] * b_x) + (G_inv[3] * b_y);

      // Update best guess with residual displacement from this level and
      // iteration.
      g_x += n_x;
      g_y += n_y;

      // LOGV("Iteration %d: delta (%.3f, %.3f)", iteration, n_x, n_y);

      // Abort early if we're already below the threshold.
      if (square(n_x) + square(n_y) < threshold_squared) {
        break;
      }
    }  // Iteration.

    if (l > 0) {
      // Every lower level of the pyramid is 2x as large dimensionally.
      g_x = 2.0f * g_x;
      g_y = 2.0f * g_y;
    }
  }  // Level.

  // LOGV("Final displacement for feature %d was (%.2f, %.2f)",
  //      iFeat, g_x, g_y);

  *final_x = u_x + g_x;
  *final_y = u_y + g_y;

  // Assign the best guess, if we're still in the image.
  if (frame1_->pyramid_[0]->validInterpPixel(*final_x, *final_y)) {
    return true;
  } else {
    return false;
  }
}
//****************************************************************************
void DatabaseInformation::computeGMM_PairObject_AllFeat(int nclusters) {

  if (TESTFLAG) {
    cout << "Inside DBInfo compute GMM PAIR O: start." << endl;
  }

  learnedModelPairObject.reserve(NOBJECTCLASSES);
  int numberOfFeat = FMPairObject[0].size();       // 5; compute it
  int countFeat;

  // loop over reference object i
  for ( int i = 0 ; i < FMPairObject.size(); i++ ) {

    vector<cv::EM> internalEMvector;  // to work with vector of vector
    vector<vector<double> > meanNormalizationPair_currentRef;
    vector<vector<double> > stdNormalizationPair_currentRef;
    vector<vector<double> > mintmp;
    vector<vector<double> > maxtmp;

    // loop over target object j
    for ( int j = 0;  j < FMPairObject[i].size(); j++) {
    
      if (TESTFLAG) {
        cout << "Inside DBInfo compute GMM PAIR O: size of model is now: " << learnedModelPairObject.size() << " for object classes :   " << i << " and " << j << endl;
      }

      int numberOfScenes = FMPairObject[i][j].size();
      int countScene = 0;
      cv::Mat FeatMat = cv::Mat::zeros ( numberOfScenes, numberOfFeat,  CV_64F ); 
     
      for(vector<vector<FeatureInformation> >::iterator it = (FMPairObject[i][j]).begin(); it != (FMPairObject[i][j]).end(); ++it) {
        countFeat = 0;
        // for each feature of the current scene and belonging to current object
        for (vector<FeatureInformation>::iterator it2 = (*it).begin(); it2 != (*it).end(); ++it2) {
          FeatureInformation currentFeature = *it2;  
          vector<float> currentFeatureValues = currentFeature.getAllValues();  
          // depending on dimentionality of that feature: I add all values in current row
          
          for ( int k = 0; k < currentFeatureValues.size() ; k++ ) {
            FeatMat.at<double>(countScene, countFeat) = (double) (currentFeatureValues.at(k));
            countFeat++;
          }
        }
        countScene++; 
      } 

      //*****************************************************************
      // // NORMALIZATION of feature matrix

      cv::Mat FeatMatreduced = FeatMat.colRange(0, 5);     


      if (DEBUG) {cout << "Before normalization " << endl; }
      vector<double> meansVectorCurrentPair = computeMean(FeatMatreduced);
      vector<double> stdVectorCurrentPair = computeStd(FeatMatreduced, meansVectorCurrentPair);
      meanNormalizationPair_currentRef.push_back(meansVectorCurrentPair);
      stdNormalizationPair_currentRef.push_back(stdVectorCurrentPair);

      vector<double> maxVectorCurrentPair = computeMax(FeatMatreduced);  
      vector<double> minVectorCurrentPair = computeMin(FeatMatreduced);
      maxtmp.push_back(maxVectorCurrentPair);
      mintmp.push_back(minVectorCurrentPair);

      // compute weight based on STD of featuers
      double weight = computeStdWeights(FeatMatreduced, maxVectorCurrentPair, minVectorCurrentPair);

      cv::Mat normalizedFeatMat;
       if (NORMALIZEPAIR == 1) {
         normalizedFeatMat = doNormalization(FeatMatreduced, meansVectorCurrentPair, stdVectorCurrentPair);
       }
       else if (NORMALIZEPAIR == 2) {
         cout << "Before normalization Min Max do Nornmalization" << endl;
         normalizedFeatMat = doNormalizationMinMax(FeatMatreduced, maxVectorCurrentPair, minVectorCurrentPair);
       } 
       else {
         normalizedFeatMat = FeatMatreduced.clone();
       }
       //****************************************************************

      if (DEBUG) {
        cout << endl << endl << "Object : " << i << "and " << j << endl << 
           "The feature matrix dim is " << normalizedFeatMat.size() << endl;
        cout << endl << endl << "The feature matrix N ROWS is " << normalizedFeatMat.rows << endl;
        cout << endl <<  "The features are " << endl <<  normalizedFeatMat << endl;
      }

      //  Training EM model for the current object.  
      cv::EM em_model(nclusters);
      cout << endl << endl << "The feature matrix N ROWS is " << normalizedFeatMat.rows << endl;


      // Constraint: trains the GMM model for object pair features ONLY if the number of samples is sufficient!
      if (FeatMat.rows > 14) {

        if (TESTFLAG) { 
          std::cout << "Training the EM model for: "  << "Objects : " << i << " and " << j << endl << std::endl; 
        }
        em_model.train ( normalizedFeatMat );    
        if (DEBUG) { 
          std::cout << "Getting the parameters of the learned GMM model." << std::endl; 
        }
      }
      else {
          std::cout << "NOT Training the EM model for: "  << "Objects : " << i << " and " << j << endl << std::endl; 
      }
      internalEMvector.push_back(em_model);
    }
    learnedModelPairObject.push_back(internalEMvector); 
    meanNormalizationPair.push_back(meanNormalizationPair_currentRef);
    stdNormalizationPair.push_back(stdNormalizationPair_currentRef);
    minFeatPair.push_back(mintmp);
    maxFeatPair.push_back(maxtmp);
  }
}