Esempio n. 1
0
Brain::Brain(WebCamWrapper& webcam):m_webcam(webcam),m_faceTracker(m_webcam)
{
    m_faceTracker.moveToThread(&facedetectorThread);

    QObject::connect(this, SIGNAL(triggerFaceDetection(cv::Mat)),
                     &m_faceTracker, SLOT(detectFaces(cv::Mat)));
    QObject::connect(&m_faceTracker, SIGNAL(faceDetection(vector<cv::Rect_<int> >,vector<string>,std::vector<cv::Point>)),
                     this, SLOT(onFacesDetection(vector<cv::Rect_<int> >,vector<string>,std::vector<cv::Point>)));
    speech.moveToThread(&soundThread);
    QObject::connect(this, SIGNAL(say(std::string)),&speech, SLOT(onSay(std::string)));
    synthesizeSound();
}
Esempio n. 2
0
void Main::process()
{
  if(autoFaceDetection && !face_cascade.load(face_cascade_path))
    {
      ROS_FATAL("--(!)Error loading cascade detector\n");
      return;
    }

  while (ros::ok())
    {
      switch (state)
        {
        case INIT:
          ROS_INFO("INIT");
          if(newImageReceived())
            {
              if(showOutput)
                sendTrackedObject(0, 0, 0, 0, 0.0);
              getLastImageFromBuffer();
              tld->detectorCascade->imgWidth = gray.cols;
              tld->detectorCascade->imgHeight = gray.rows;
              tld->detectorCascade->imgWidthStep = gray.step;
              geometry_msgs::Point p;
                tld->detectorEnabled = false;
                tld->learningEnabled = false;
              center_camera_view.x = img.cols/2;
              center_camera_view.y = img.rows/2;


              state = TRACKER_INIT;
            }
          break;
        case TRACKER_INIT:
          ROS_INFO("TRACKER_INIT");

          if(loadModel && !modelImportFile.empty())
            {
              ROS_INFO("Loading model %s", modelImportFile.c_str());

              tld->readFromFile(modelImportFile.c_str());
              tld->learningEnabled = false;
              state = TRACKING;
            }
          else if(autoFaceDetection || correctBB)
            {
              if(autoFaceDetection)
                {
                  target_image = gray;
                  target_bb = faceDetection();
                }

              sendTrackedObject(target_bb.x,target_bb.y,target_bb.width,target_bb.height,1.0);

              ROS_INFO("Starting at %d %d %d %d\n", target_bb.x, target_bb.y, target_bb.width, target_bb.height);

              tld->selectObject(target_image, &target_bb);
              tld->learningEnabled = true;
              state = TRACKING;
            }
          else
            {
              ros::Duration(1.0).sleep();
              ROS_INFO("Waiting for a BB");
            }
          break;
        case TRACKING:
          if(newImageReceived())
            {
              ros::Time tic = ros::Time::now();

              getLastImageFromBuffer();
              tld->processImage(img);

              ros::Duration toc = (ros::Time::now() - tic);
              float fps = 1.0/toc.toSec();

              std_msgs::Float32 msg_fps;
              msg_fps.data = fps;
              pub2.publish(msg_fps);

              if(showOutput)
                {
                  if(tld->currBB != NULL)
                    {
                      sendTrackedObject(tld->currBB->x,tld->currBB->y,tld->currBB->width,tld->currBB->height,tld->currConf);
                    }
                  else
                    {
                      sendTrackedObject(1, 1, 1, 1, 0.0);
                    }
                }
            }
          break;
        case STOPPED:
          ros::Duration(1.0).sleep();
          ROS_INFO("Tracker stopped");
          break;
        default:
          break;
        }
    }

  if(exportModelAfterRun)
    {
      tld->writeToFile(modelExportFile.c_str());
    }

  semaphore.unlock();
}