Example #1
0
/**
 *  @brief          Main
 *
 *  @param[in]      argc
 *  @param[in]     argv
 *
 *  @returns  1
 *
 */
int main(int argc, const char* argv[]) {
  if (argc < 5) {
		banner();
		help();
		cerr << "ERR: missing parameters" << endl;
		return -3;
	}

  // Intializing the face detector
	string faceDetConfig(argv[1]);
	string eyeDetConfig(argv[2]);
  int width = std::atoi(argv[3]);
  int height = std::atoi(argv[4]);
  int nwidths = std::atoi(argv[5]);
  int nlambdas = std::atoi(argv[6]);
  int nthetas = std::atoi(argv[7]);
  FacePreProcessor facepreproc(faceDetConfig, eyeDetConfig, width, height, nwidths, nlambdas, nthetas);

  // Setting the mode
  int i;
	string mode;
  mode = string(argv[8]);

	if (mode != "svm" && mode != "ada") {
    mode = "ada";
    i = 8;
  } else {
    i = 9;
  }

  // Setting the classifiers
  vector<string> cl_paths;
  EmoDetector* emodetector;

  for(; i < argc; i++) {
    cl_paths.push_back(string(argv[i]));
  }

  if (mode == "svm") {
    emodetector = new SVMEmoDetector(kCfactor, kMaxIteration, kErrorMargin);
  } else {
    emodetector = new BoostEmoDetector(kBoostType, kTrimWeight, kMaxDepth);
  }
  emodetector->init(cl_paths);

  // Creating and starting the EmotimeGUI
  int fps = 120;
	try {
    EmotimeGui gui(&facepreproc, emodetector, fps);
    gui.run();
	} catch (int e) {
		cerr << "ERR: Exception #" << e << endl;
		return -e;
	}

  delete emodetector;
  return 0;
}
Example #2
0
  EmoTimeWrap()
    : it_(nh_)
  {

// Subscrive to input video feed and sadness_vs_anger_contempt_disgust_fear_happy_neutral_surprise_featspublish output video feed
    counter = 0;
    nh_.getParam("camera_topic", subscribe_topic);
    nh_.getParam("filtered_face_locations", filtered_face_locations);
    image_sub_ = it_.subscribe(subscribe_topic, 1,&EmoTimeWrap::imageCb, this);
    face_location_sub = nh_.subscribe(filtered_face_locations, 1, &EmoTimeWrap::list_of_faces_update, this);
    //image_pub_ = it_.advertise("/emotime_node/output_video", 1);
    //emotion_pub = nh_.advertise<std_msgs::String>("emotion_states", 1000);
    faces_locations = nh_.advertise<cmt_tracker_msgs::Objects>("emo_pub_registered", 10);
    method = "svm";
    // config = "/home/lina/Desktop/emotime_final/emotime/resources/haarcascade_frontalface_cbcl1.xml";
    config = "/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml";
    config_e = "/usr/share/opencv/haarcascades/haarcascade_eye.xml";
    size.width = 52;
    size.height = 52;
    nwidths = 1;
    nlambdas = 5;
    nthetas = 8;

    path= ros::package::getPath("emotime");
    classifier_paths.push_back(path + "/svm/anger_vs_contempt_disgust_fear_happy_neutral_sadness_surprise_feats.xml");
    classifier_paths.push_back(path + "/svm/contempt_vs_anger_disgust_fear_happy_neutral_sadness_surprise_feats.xml");
    classifier_paths.push_back(path + "/svm/disgust_vs_anger_contempt_fear_happy_neutral_sadness_surprise_feats.xml");
    classifier_paths.push_back(path + "/svm/fear_vs_anger_contempt_disgust_happy_neutral_sadness_surprise_feats.xml");
    classifier_paths.push_back(path + "/svm/happy_vs_anger_contempt_disgust_fear_neutral_sadness_surprise_feats.xml");
    classifier_paths.push_back(path + "/svm/neutral_vs_anger_contempt_disgust_fear_happy_sadness_surprise_feats.xml");
    classifier_paths.push_back(path + "/svm/sadness_vs_anger_contempt_disgust_fear_happy_neutral_surprise_feats.xml");
    classifier_paths.push_back(path + "/svm/surprise_vs_anger_contempt_disgust_fear_happy_neutral_sadness_feats.xml");

    //cout << "Length of the classifiers: " << classifier_paths.size() << std::endl;
    preprocessor = new FacePreProcessor(config, config_e, size.width,
                                        size.height, nwidths, nlambdas, nthetas);

    emodetector = new SVMEmoDetector(kCfactor, kMaxIteration, kErrorMargin);
    emodetector->init(classifier_paths);
    //text = "Funny text inside the box";
    fontFace = FONT_HERSHEY_SCRIPT_SIMPLEX;
    fontScale = 2;
    thickness = 3;
    textOrg.x = 10;
    textOrg.y = 130;
    int lineType = 8;
    bool bottomLeftOrigin = false;
    //cv::namedWindow(OPENCV_WINDOW);
  }
/**
 *  @brief          Main
 *
 *  @param[in]      argc
 *  @param[in]     argv
 *
 *  @returns  1
 *
 */
int main(int argc, const char *argv[]) {
  if (argc < 9) {
		banner();
		help();
		cerr << "ERR: missing parameters" << endl;
		return -3;
	}
	string infile; // = string(argv[1]);
	string method(argv[1]);
	string config(argv[2]);
	string config_e(argv[3]);

  cv::Size size(0,0);
  int nwidths, nlambdas, nthetas;
  size.width = abs(atoi(argv[4]));
	size.height = abs(atoi(argv[5]));
  nwidths = abs(atoi(argv[6]));
  nlambdas= abs(atoi(argv[7]));
  nthetas = abs(atoi(argv[8]));
  vector<string> classifier_paths;

  if (argc >= 10) {
    // Read boost XML paths
    for (int i = 9; i < argc; i++) {
      string clpath(argv[i]);
      classifier_paths.push_back(string(argv[i]));
    }
  } else {
    cerr << "ERR: you must specify some classifiers" << endl;
    return -2;
  }

  FacePreProcessor* preprocessor;
  EmoDetector* emodetector;

	try {

    if (config_e == "none") {
      preprocessor = new FacePreProcessor(config, size.width, size.height,
          nwidths, nlambdas, nthetas);
    } else {
      double t1 = timestamp();
      preprocessor = new FacePreProcessor(config, config_e, size.width,
          size.height, nwidths, nlambdas, nthetas);
      cout << " FacePreProcessor: " << timestamp() - t1 << " ";
    }

    if (method == "svm") {
      emodetector = new SVMEmoDetector(kCfactor, kMaxIteration, kErrorMargin);
    } else {
      emodetector = new BoostEmoDetector(kBoostType, kTrimWeight, kMaxDepth);
    }

    emodetector->init(classifier_paths);

    cout << "Insert the image file path: " << endl;
    while(std::getline(std::cin, infile)) {
      try {
        cout << "Processing '" << infile << "'" << endl;
		    Mat img = matrix_io_load(infile);
        Mat features;
        bool canPreprocess = preprocessor->preprocess(img, features);
        if (!canPreprocess) {
          cerr << "ERR: Cannot preprocess this image '" << infile << "'" << endl;
          continue;
        }
        pair<Emotion,float> prediction = emodetector->predict(features);
        cout << "Emotion predicted: " << emotionStrings(prediction.first) << " with score " << prediction.second << endl;
        cout << "Insert the image file path: " << endl;
      } catch (int ee) {
        cerr << "ERR: Something wrong with '" << infile << "' (" << ee << ")" << endl;
      }
    }

    delete emodetector;
    delete preprocessor;
	} catch (int e) {
		cerr << "ERR: Exception #" << e << endl;
		return -e;
	}

  return 0;
}
Example #4
0
  void imageCb(const sensor_msgs::ImageConstPtr& msg)
  {

    cv_bridge::CvImagePtr cv_ptr;
    try
    {
      cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8);
    }
    catch (cv_bridge::Exception& e)
    {
      ROS_ERROR("cv_bridge exception: %s", e.what());
      return;
    }

    Mat img = cv_ptr->image;
    Mat features;
    cv::cvtColor(img, img, cv::COLOR_BGR2GRAY);
    for (int i = 0; i < face_locs.objects.size(); i++)
    {
      Mat roi = img(cv::Rect(face_locs.objects[i].object.x_offset, face_locs.objects[i].object.y_offset,
                              face_locs.objects[i].object.width, face_locs.objects[i].object.height)).clone();


      bool canPreprocess = preprocessor->preprocess(roi, features);
      if (!canPreprocess) {
       // cout << " Can't Process" << endl;
      }
      else {
        pair<Emotion, float> prediction = emodetector->predict(features);
        emotionString = emotionStrings(prediction.first);
        emotionAccuracy = prediction.second;
        //emotion_msg.data = emotionStrings(prediction.first);
       // ROS_INFO("%s", emotion_msg.data.c_str());

      }
      //cv::putText(roi, emotionString, textOrg, fontFace, fontScale, CV_RGB(255, 255, 0), thickness, lineType);
      // Output modified video stream


//      std::string s = SSTR( i );

      //cv::imshow(s, roi);
      //cv::waitKey(3);
      //emotion_pub.publish(emotion_msg);
      //image_pub_.publish(cv_ptr->toImageMsg());

      cmt_tracker_msgs::Object face_description;
      face_description = face_locs.objects[i];


      face_description.obj_states.data = emotionString;
      face_description.obj_accuracy.data = emotionAccuracy;
      emot_pub_faces.objects.push_back(face_description);

    }
    faces_locations.publish(emot_pub_faces);
    face_locs.objects.clear();
    emot_pub_faces.objects.clear();




  }