// predict image in the memory
    void predictImage( cv::Mat &face, std::vector<int> &predict_result, std::vector<double> &predict_confidence, double predicted_confidence = 0.0 )
    {

        //resize image
        cv::resize(face, face, cv::Size(1000, 1000));

        int predicted_label = -1;

        //push results from all three methods to one vector

        //eigenfaces
        eigenfaceRecognizor->predict(face, predicted_label, predicted_confidence);
        predict_result.push_back(predicted_label);
        predict_confidence.push_back(predicted_confidence);


        //fisherfaces
        fisherfaceRecognizor->predict(face, predicted_label, predicted_confidence);
        predict_result.push_back(predicted_label);
        predict_confidence.push_back(predicted_confidence);



        //LBPH
        LBPHRecognizor->predict(face, predicted_label, predicted_confidence);
        predict_result.push_back(predicted_label);
        predict_confidence.push_back(predicted_confidence);
    }
void TellThatToMyCamera_v1_0App::updateExpressions (Surface cameraImage){
	cv::Mat grayCameraImage( toOcv( cameraImage, CV_8UC1 ) );   // create a grayscale copy of the input image
   	cv::equalizeHist( grayCameraImage, grayCameraImage );       // equalize the histogram for (just a little) more accuracy
    mExpressions.clear();                                       // clear out the previously deteced expressions
    mPredictions.clear();
    vector<cv::Rect> expressions;
    
    // Next is to detect the faces and iterate them, appending them to mExpressions
    mExpressionsCascade.detectMultiScale(grayCameraImage, expressions);
    
    // At this point the position of the faces has been calculated!
    // Now it's time to get the faces, make a prediction and save it for the video.
    
    cv::Mat graySq(100,100,CV_8UC1);    // gray square for assigning the proper size of the resized detected faces
    
    for(vector<cv::Rect>::const_iterator expressionIter = expressions.begin(); expressionIter != expressions.end(); ++expressionIter){
        // Get the process face by face (in case there's more than one face in the video frame image)
        Rectf expressionRect(fromOcv(*expressionIter));
        mExpressions.push_back(expressionRect);
        
        cv::Rect face_i (*expressionIter);                      // Rect with data (size and position) of the detected face
        cv::Mat face = grayCameraImage(face_i);                 // Image containing the detected face
        cv::Mat face_resized;                                   // Image for the resized version of the detected face
        cv::resize(face, face_resized, graySq.size(), 1, 1, cv::INTER_CUBIC); // resizes the image
        // cv::resize(face, face_resized, graySq.size(), 0, 0, cv::INTER_LINEAR);
        
        // Now, perform the EXPRESSION PREDICTION!!!
        int predicted = mFisherFaceRec->predict(face_resized);
        mPredictions.push_back(predicted);                      // put the corresponding label to the corresponding face
    }
}
    void predict( std::string &image_path, std::vector<int> &predict_result, std::vector<double> &predict_confidence, double predicted_confidence = 0.0 )
    {

        std::vector<cv::Mat> faces_image;

        int predicted_label = -1;

        //make sure there is no previous result
        predict_result.clear();
        predict_confidence.clear();


        for( cv::Mat face : faces_image )
        {

            //turn it to grey image and adjust size
            cv::cvtColor(face, face, CV_BGR2GRAY);
            cv::resize(face, face, cv::Size(500, 500));


            //push results from all three methods to one vector

            //eigenfaces
            eigenfaceRecognizor->predict(face, predicted_label, predicted_confidence);
            predict_result.push_back(predicted_label);
            predict_confidence.push_back(predicted_confidence);


            //fisherfaces
            fisherfaceRecognizor->predict(face, predicted_label, predicted_confidence);
            predict_result.push_back(predicted_label);
            predict_confidence.push_back(predicted_confidence);



            //LBPH
            LBPHRecognizor->predict(face, predicted_label, predicted_confidence);
            predict_result.push_back(predicted_label);
            predict_confidence.push_back(predicted_confidence);

        }


        //blur stranger's face
//        detector->blurFacesOrigin(image_path);
    }
    void predictSingle( const std::string &image_path, std::vector<int> &predict_result, std::vector<double> &predict_confidence, double predicted_confidence = 0.0 )
    {

        //read image
        cv::Mat face = cv::imread(image_path);

        if( ! face.data )
        {
            std::cout << "Can not find image!" << std::endl;
        }

        //turn it to grey image and adjust size
        cv::cvtColor(face, face, CV_BGR2GRAY);
        cv::resize(face, face, cv::Size(500, 500));

        int predicted_label = -1;

        //push results from all three methods to one vector

        //eigenfaces
        eigenfaceRecognizor->predict(face, predicted_label, predicted_confidence);
        predict_result.push_back(predicted_label);
        predict_confidence.push_back(predicted_confidence);


        //fisherfaces
        fisherfaceRecognizor->predict(face, predicted_label, predicted_confidence);
        predict_result.push_back(predicted_label);
        predict_confidence.push_back(predicted_confidence);



        //LBPH
        LBPHRecognizor->predict(face, predicted_label, predicted_confidence);
        predict_result.push_back(predicted_label);
        predict_confidence.push_back(predicted_confidence);
    }
void FaceRecognition::detectAndDraw(cv::Mat image,
                                    cv::CascadeClassifier &cascade,
                                    cv::CascadeClassifier &nested_cascade,
                                    double scale,
                                    bool try_flip)
{
    int    i    = 0;
    double tick = 0.0;
    std::vector<cv::Rect> faces_a, faces_b;
    cv::Scalar colors[] = {
        CV_RGB(0, 0, 255),
        CV_RGB(0, 128, 255),
        CV_RGB(0, 255, 255),
        CV_RGB(0, 255, 0),
        CV_RGB(255, 128, 0),
        CV_RGB(255, 255, 0),
        CV_RGB(255, 0, 0),
        CV_RGB(255, 0, 255)
    };
    cv::Mat image_gray;
    cv::Mat image_small(cvRound(image.rows / scale),
                        cvRound(image.cols / scale),
                        CV_8UC1);
    // Convert to gray image.
    cv::cvtColor(image, image_gray, CV_BGR2GRAY);
    // Convert gray image to small size.
    cv::resize(image_gray, image_small, image_small.size(), 0, 0,
               cv::INTER_LINEAR);
    cv::equalizeHist(image_small, image_small);

    tick = (double)cvGetTickCount();
    cascade.detectMultiScale(image_small, faces_a, 1.1, 2, 0 |
                             CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));

    if (try_flip) {
        cv::flip(image_small, image_small, 1);
        cascade.detectMultiScale(image_small, faces_b, 1.1, 2, 0 |
                                 CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
        std::vector<cv::Rect>::const_iterator it = faces_b.begin();
        for (; it != faces_b.end(); it++) {
            faces_a.push_back(cv::Rect(image_small.cols - it->x - it->width,
                                       it->y, it->width, it->height));
        }
    }

    // Calculate detection's time.
    tick = (double)cvGetTickCount() - tick;
    std::cout << "Detection time: "
              << tick / ((double)cvGetTickCount() * 1000.0)
              << " ms"
              << std::endl;

    std::vector<cv::Rect>::const_iterator it = faces_a.begin();
    for (; it != faces_a.end(); it++, i++) {
        int radius;
        double aspect_ratio = (double)it->width / it->height;
        std::vector<cv::Rect> nested_objects;
        cv::Mat small_image_roi;
        cv::Point center;
        cv::Scalar color = colors[i % 8];

        // Capture detected face and predict it.
        cv::Mat image_gray;
        cv::Mat image_result(cvRound(IMG_HEIGH), cvRound(IMG_WIDTH), CV_8UC1);
        cv::Mat image_temp;
        cv::Rect rect;
        rect.x = cvRound(it->x * scale);
        rect.y = cvRound(it->y * scale);
        rect.height = cvRound(it->height * scale);
        rect.width  = cvRound(it->width  * scale);
        image_temp  = image(rect);
        cv::cvtColor(image_temp, image_gray, CV_BGR2GRAY);
        cv::resize(image_gray, image_result, image_result.size(), 0, 0,
                   cv::INTER_LINEAR);
        int predicted_label = g_model->predict(image_result);

        std::cout << "*************************" << std::endl
                  << "The predicted label: "     << predicted_label
                  << std::endl
                  << "*************************"
                  << std::endl;

        // Recognize specific face for sending character to serial device.
        if (predicted_label == 1) {
            g_face_recognition.writeCharToSerial('Y');
        }
        else {
            g_face_recognition.writeCharToSerial('N');
        }

        // Draw the circle for faces.
        if (0.75 < aspect_ratio && aspect_ratio > 1.3) {
            center.x = cvRound((it->x + it->width * 0.5) * scale);
            center.y = cvRound((it->y + it->height * 0.5) * scale);
            radius = cvRound((it->width + it->height) * 0.25 * scale);
            cv::circle(image, center, radius, color, 3, 8, 0);
        }
        else {
            // Draw the rectangle for faces.
            cv::rectangle(image,
                          cvPoint(cvRound(it->x * scale),
                                  cvRound(it->y * scale)),
                          cvPoint(cvRound((it->x + it->width  - 1) * scale),
                                  cvRound((it->y + it->height - 1) * scale)),
                          color,
                          3,
                          8,
                          0);
            if (nested_cascade.empty()) {
                continue ;
            }
            small_image_roi = image_small(*it);
            nested_cascade.detectMultiScale(small_image_roi, nested_objects,
                                            1.1, 2, 0 | CV_HAAR_SCALE_IMAGE,
                                            cv::Size(30, 30));
            std::vector<cv::Rect>::const_iterator it_temp =
                nested_objects.begin();
            // Draw the circle for eyes.
            for (; it_temp != nested_objects.end(); it_temp++) {
                center.x = cvRound((it->x + it_temp->x + it_temp->width * 0.5)
                    * scale);
                center.y = cvRound((it->y + it_temp->y + it_temp->height * 0.5)
                    * scale);
                radius = cvRound((it_temp->width + it_temp->height) * 0.25
                    * scale);
                cv::circle(image, center, radius, color, 3, 8, 0);
            }
        }
    }
    // Open camera window.
    cv::imshow("Face Recognition", image);
}