/**
 *  @brief          Main
 *
 *  @param[in]      argc
 *  @param[in]     argv
 *
 *  @returns  1
 *
 */
int main(int argc, const char *argv[]) {
  if (argc < 9) {
		banner();
		help();
		cerr << "ERR: missing parameters" << endl;
		return -3;
	}
	string infile; // = string(argv[1]);
	string method(argv[1]);
	string config(argv[2]);
	string config_e(argv[3]);

  cv::Size size(0,0);
  int nwidths, nlambdas, nthetas;
  size.width = abs(atoi(argv[4]));
	size.height = abs(atoi(argv[5]));
  nwidths = abs(atoi(argv[6]));
  nlambdas= abs(atoi(argv[7]));
  nthetas = abs(atoi(argv[8]));
  vector<string> classifier_paths;

  if (argc >= 10) {
    // Read boost XML paths
    for (int i = 9; i < argc; i++) {
      string clpath(argv[i]);
      classifier_paths.push_back(string(argv[i]));
    }
  } else {
    cerr << "ERR: you must specify some classifiers" << endl;
    return -2;
  }

  FacePreProcessor* preprocessor;
  EmoDetector* emodetector;

	try {

    if (config_e == "none") {
      preprocessor = new FacePreProcessor(config, size.width, size.height,
          nwidths, nlambdas, nthetas);
    } else {
      double t1 = timestamp();
      preprocessor = new FacePreProcessor(config, config_e, size.width,
          size.height, nwidths, nlambdas, nthetas);
      cout << " FacePreProcessor: " << timestamp() - t1 << " ";
    }

    if (method == "svm") {
      emodetector = new SVMEmoDetector(kCfactor, kMaxIteration, kErrorMargin);
    } else {
      emodetector = new BoostEmoDetector(kBoostType, kTrimWeight, kMaxDepth);
    }

    emodetector->init(classifier_paths);

    cout << "Insert the image file path: " << endl;
    while(std::getline(std::cin, infile)) {
      try {
        cout << "Processing '" << infile << "'" << endl;
		    Mat img = matrix_io_load(infile);
        Mat features;
        bool canPreprocess = preprocessor->preprocess(img, features);
        if (!canPreprocess) {
          cerr << "ERR: Cannot preprocess this image '" << infile << "'" << endl;
          continue;
        }
        pair<Emotion,float> prediction = emodetector->predict(features);
        cout << "Emotion predicted: " << emotionStrings(prediction.first) << " with score " << prediction.second << endl;
        cout << "Insert the image file path: " << endl;
      } catch (int ee) {
        cerr << "ERR: Something wrong with '" << infile << "' (" << ee << ")" << endl;
      }
    }

    delete emodetector;
    delete preprocessor;
	} catch (int e) {
		cerr << "ERR: Exception #" << e << endl;
		return -e;
	}

  return 0;
}
Example #2
0
  void imageCb(const sensor_msgs::ImageConstPtr& msg)
  {

    cv_bridge::CvImagePtr cv_ptr;
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }

    Mat img = cv_ptr->image;
    Mat features;
    cv::cvtColor(img, img, cv::COLOR_BGR2GRAY);
    for (int i = 0; i < face_locs.objects.size(); i++)
    {
      Mat roi = img(cv::Rect(face_locs.objects[i].object.x_offset, face_locs.objects[i].object.y_offset,
                              face_locs.objects[i].object.width, face_locs.objects[i].object.height)).clone();


      bool canPreprocess = preprocessor->preprocess(roi, features);
      if (!canPreprocess) {
       // cout << " Can't Process" << endl;
      }
      else {
        pair<Emotion, float> prediction = emodetector->predict(features);
        emotionString = emotionStrings(prediction.first);
        emotionAccuracy = prediction.second;
        //emotion_msg.data = emotionStrings(prediction.first);
       // ROS_INFO("%s", emotion_msg.data.c_str());

      }
      //cv::putText(roi, emotionString, textOrg, fontFace, fontScale, CV_RGB(255, 255, 0), thickness, lineType);
      // Output modified video stream


//      std::string s = SSTR( i );

      //cv::imshow(s, roi);
      //cv::waitKey(3);
      //emotion_pub.publish(emotion_msg);
      //image_pub_.publish(cv_ptr->toImageMsg());

      cmt_tracker_msgs::Object face_description;
      face_description = face_locs.objects[i];


      face_description.obj_states.data = emotionString;
      face_description.obj_accuracy.data = emotionAccuracy;
      emot_pub_faces.objects.push_back(face_description);

    }
    faces_locations.publish(emot_pub_faces);
    face_locs.objects.clear();
    emot_pub_faces.objects.clear();




  }