Пример #1
0
//==============================================================================
vector<uchar> ANN::predict(Mat_<float> &testData)
{
    int numberOfSamples = testData.rows;

    Mat_<float> classifResult(1, this->numberOfClasses);
    vector<uchar> predictedLabels(numberOfSamples);

    for(int i = 0; i < numberOfSamples; i++) {

        nnetwork->predict(testData.row(i), classifResult);
#if DEBUG
        cout << classifResult << endl;
#endif
        //        int max = 0;
//        int maxI = 0;
//        for(int j = 0; j < this->numberOfClasses; ++j){
//            if( classifResult(0,j) > max){
//                max = classifResult(0,j);
//                maxI = j;
//            }
//        }
        Point2i max_loc;
        minMaxLoc(classifResult, 0, 0, 0, &max_loc);

        // add row into predictions
        predictions.push_back(classifResult);
        //predictedLabels.push_back(maxI);
        predictedLabels[i] = static_cast<unsigned char>(max_loc.x);
    }
    predictLabels.insert(predictLabels.end(),predictedLabels.begin(), predictedLabels.end());

    return predictLabels;
}
Пример #2
0
//==============================================================================
vector<uchar> ANN::predictMouth(Mat_<float> &testData)
{
    int numberOfSamples = testData.rows;

    Mat_<float> classifResult(1, (int)MOUTH_CLASSES);
    vector<uchar> predictedLabels(numberOfSamples);

    for(int i = 0; i < numberOfSamples; i++) {

        nnetwork->predict(testData.row(i), classifResult);

        Point2i max_loc;
        minMaxLoc(classifResult, 0, 0, 0, &max_loc);

        // add row into predictions
        predictions.push_back(classifResult);

        predictedLabels[i] = static_cast<unsigned char>(max_loc.x) + 2;
    }
    predictLabels.insert(predictLabels.end(),predictedLabels.begin(), predictedLabels.end());

    return predictedLabels;
}
Пример #3
0
void AdaBoost<MatType, WeakLearner>::Train(
    const MatType& data,
    const arma::Row<size_t>& labels,
    const WeakLearner& other,
    const size_t iterations,
    const double tolerance)
{
  // Clear information from previous runs.
  wl.clear();
  alpha.clear();

  // Count the number of classes.
  classes = (arma::max(labels) - arma::min(labels)) + 1;
  this->tolerance = tolerance;

  // crt is the cumulative rt value for terminating the optimization when rt is
  // changing by less than the tolerance.
  double rt, crt, alphat = 0.0, zt;

  ztProduct = 1.0;

  // To be used for prediction by the weak learner.
  arma::Row<size_t> predictedLabels(labels.n_cols);

  // Use tempData to modify input data for incorporating weights.
  MatType tempData(data);

  // This matrix is a helper matrix used to calculate the final hypothesis.
  arma::mat sumFinalH = arma::zeros<arma::mat>(classes, predictedLabels.n_cols);

  // Load the initial weights into a 2-D matrix.
  const double initWeight = 1.0 / double(data.n_cols * classes);
  arma::mat D(classes, data.n_cols);
  D.fill(initWeight);

  // Weights are stored in this row vector.
  arma::rowvec weights(predictedLabels.n_cols);

  // This is the final hypothesis.
  arma::Row<size_t> finalH(predictedLabels.n_cols);

  // Now, start the boosting rounds.
  for (size_t i = 0; i < iterations; i++)
  {
    // Initialized to zero in every round.  rt is used for calculation of
    // alphat; it is the weighted error.
    // rt = (sum) D(i) y(i) ht(xi)
    rt = 0.0;

    // zt is used for weight normalization.
    zt = 0.0;

    // Build the weight vectors.
    weights = arma::sum(D);

    // Use the existing weak learner to train a new one with new weights.
    WeakLearner w(other, tempData, labels, weights);
    w.Classify(tempData, predictedLabels);

    // Now from predictedLabels, build ht, the weak hypothesis
    // buildClassificationMatrix(ht, predictedLabels);

    // Now, calculate alpha(t) using ht.
    for (size_t j = 0; j < D.n_cols; j++) // instead of D, ht
    {
      if (predictedLabels(j) == labels(j))
        rt += arma::accu(D.col(j));
      else
        rt -= arma::accu(D.col(j));
    }

    if ((i > 0) && (std::abs(rt - crt) < tolerance))
      break;

    crt = rt;

    // Our goal is to find alphat which mizimizes or approximately minimizes the
    // value of Z as a function of alpha.
    alphat = 0.5 * log((1 + rt) / (1 - rt));

    alpha.push_back(alphat);
    wl.push_back(w);

    // Now start modifying the weights.
    for (size_t j = 0; j < D.n_cols; j++)
    {
      const double expo = exp(alphat);
      if (predictedLabels(j) == labels(j))
      {
        for (size_t k = 0; k < D.n_rows; k++)
        {
          // We calculate zt, the normalization constant.
          D(k, j) /= expo;
          zt += D(k, j); // * exp(-1 * alphat * yt(j,k) * ht(j,k));


          // Add to the final hypothesis matrix.
          // sumFinalH(k, j) += (alphat * ht(k, j));
          if (k == labels(j))
            sumFinalH(k, j) += (alphat); // * ht(k, j));
          else
            sumFinalH(k, j) -= (alphat);
        }
      }
      else
      {
        for (size_t k = 0; k < D.n_rows; k++)
        {
          // We calculate zt, the normalization constant.
          D(k, j) *= expo;
          zt += D(k, j);

          // Add to the final hypothesis matrix.
          if (k == labels(j))
            sumFinalH(k, j) += alphat; // * ht(k, j));
          else
            sumFinalH(k, j) -= alphat;
        }
      }
    }

    // Normalize D.
    D /= zt;

    // Accumulate the value of zt for the Hamming loss bound.
    ztProduct *= zt;
  }

  // Iterations are over, now build a strong hypothesis from a weighted
  // combination of these weak hypotheses.
  arma::colvec tempSumFinalH;
  arma::uword maxIndex;

  for (size_t i = 0;i < sumFinalH.n_cols; i++)
  {
    tempSumFinalH = sumFinalH.unsafe_col(i);
    tempSumFinalH.max(maxIndex);
    finalH(i) = maxIndex;
  }

  finalHypothesis = finalH;
}
Пример #4
0
Adaboost<MatType, WeakLearner>::Adaboost(const MatType& data, 
        const arma::Row<size_t>& labels, int iterations, 
        size_t classes, const WeakLearner& other)
{
  // note: put a fail safe for the variable 'classes' or 
  // remove it entirely by using unique function.
  int i, j, k;
  double rt, alphat = 0.0, zt;
  
  // To be used for prediction by the Weak Learner for prediction.
  arma::Row<size_t> predictedLabels(labels.n_cols);
  
  // Use tempData to modify input Data for incorporating weights.
  MatType tempData(data);
  
  // Build the classification Matrix yt from labels
  arma::mat yt(predictedLabels.n_cols, classes);
  
  // Build a classification matrix of the form D(i,l)
  // where i is the ith instance
  // l is the lth class.
  buildClassificationMatrix(yt, labels);
  
  // ht(x), to be loaded after a round of prediction every time the weak
  // learner is run, by using the buildClassificationMatrix function
  arma::mat ht(predictedLabels.n_cols, classes);

  // This matrix is a helper matrix used to calculate the final hypothesis.
  arma::mat sumFinalH(predictedLabels.n_cols, classes);
  sumFinalH.fill(0.0);
  
  // load the initial weights into a 2-D matrix
  const double initWeight = (double) 1 / (data.n_cols * classes);
  arma::mat D(data.n_cols, classes);
  D.fill(initWeight);
  // D.print("The value of D after initialization.");
  
  // Weights are to be compressed into this rowvector
  // for focussing on the perceptron weights.
  arma::rowvec weights(predictedLabels.n_cols);
  // weights.print("This is the value of weight just after initialization.");
  // This is the final hypothesis.
  arma::rowvec finalH(predictedLabels.n_cols);

  // now start the boosting rounds
  for (i = 0; i < iterations; i++)
  {
    std::cout<<"Run "<<i<<" times !\n";
    // Initialized to zero in every round.
    rt = 0.0; 
    zt = 0.0;
    
    // Build the weight vectors
    D.print("The value of D in each iteration, just before going for training.");
    buildWeightMatrix(D, weights);
    // D.print("This is the value of D, before sending off to modify data");
    // call the other weak learner and train the labels.
    WeakLearner w(other, tempData, weights, labels);
    w.Classify(tempData, predictedLabels);

    //Now from predictedLabels, build ht, the weak hypothesis
    buildClassificationMatrix(ht, predictedLabels);

    // Now, start calculation of alpha(t) using ht
    
    // begin calculation of rt

    for (j = 0;j < ht.n_rows; j++)
    {
      for (k = 0;k < ht.n_cols; k++)
        rt += (D(j,k) * yt(j,k) * ht(j,k));
    }

    // end calculation of rt

    alphat = 0.5 * log((1 + rt) / (1 - rt));

    // end calculation of alphat
    
    // now start modifying weights

    for (j = 0;j < D.n_rows; j++)
    {
      for (k = 0;k < D.n_cols; k++)
      {  
        // we calculate zt, the normalization constant
        zt += D(j,k) * exp(-1 * alphat * yt(j,k) * ht(j,k));
        D(j,k) = D(j,k) * exp(-1 * alphat * yt(j,k) * ht(j,k));

        // adding to the matrix of FinalHypothesis 
        sumFinalH(j,k) += (alphat * ht(j,k));
      }
    }

    // normalization of D

    D = D / zt;
  
  }

  // Iterations are over, now build a strong hypothesis
  // from a weighted combination of these weak hypotheses.
  
  // This step of storing it in a temporary row vector can be improved upon ? 
  arma::rowvec tempSumFinalH;
  arma::uword max_index;
  for (i = 0;i < sumFinalH.n_rows; i++)
  {
    tempSumFinalH = sumFinalH.row(i);
    tempSumFinalH.max(max_index);
    finalH(i) = max_index;
  }
  // labels.print("These are the labels.");
  finalH.print("This is the final hypothesis.");
  int counterror = 0;
  for (i = 0; i < labels.n_cols; i++)
    if(labels(i) != finalH(i))
    { 
      std::cout<<i<<"th prediction not correct!\n";
      counterror++;
    }
  std::cout<<"There are "<<counterror<<" number of misclassified records.\n";  
  std::cout<<"The error rate is: "<<(double)counterror/labels.n_cols;
  //finalH is the final hypothesis.
}