virtual void onInit()
  {
    nh_ = getNodeHandle();
    it_ = boost::shared_ptr<image_transport::ImageTransport>(new image_transport::ImageTransport(nh_));
    local_nh_ = ros::NodeHandle("~");

    local_nh_.param("debug_view", debug_view_, false);
    subscriber_count_ = 0;
    prev_stamp_ = ros::Time(0, 0);

    image_transport::SubscriberStatusCallback img_connect_cb    = boost::bind(&FaceDetectionNodelet::img_connectCb, this, _1);
    image_transport::SubscriberStatusCallback img_disconnect_cb = boost::bind(&FaceDetectionNodelet::img_disconnectCb, this, _1);
    ros::SubscriberStatusCallback msg_connect_cb    = boost::bind(&FaceDetectionNodelet::msg_connectCb, this, _1);
    ros::SubscriberStatusCallback msg_disconnect_cb = boost::bind(&FaceDetectionNodelet::msg_disconnectCb, this, _1);
    img_pub_ = image_transport::ImageTransport(local_nh_).advertise("image", 1, img_connect_cb, img_disconnect_cb);
    msg_pub_ = local_nh_.advertise<opencv_apps::FaceArrayStamped>("faces", 1, msg_connect_cb, msg_disconnect_cb);
        
    if( debug_view_ ) {
      subscriber_count_++;
    }
    std::string face_cascade_name, eyes_cascade_name;
    local_nh_.param("face_cascade_name", face_cascade_name, std::string("/usr/share/opencv/haarcascades/haarcascade_frontalface_alt.xml"));
    local_nh_.param("eyes_cascade_name", eyes_cascade_name, std::string("/usr/share/opencv/haarcascades/haarcascade_eye_tree_eyeglasses.xml"));
        
    if( !face_cascade_.load( face_cascade_name ) ){ NODELET_ERROR("--Error loading %s", face_cascade_name.c_str()); };
    if( !eyes_cascade_.load( eyes_cascade_name ) ){ NODELET_ERROR("--Error loading %s", eyes_cascade_name.c_str()); };

    dynamic_reconfigure::Server<face_detection::FaceDetectionConfig>::CallbackType f =
      boost::bind(&FaceDetectionNodelet::reconfigureCallback, this, _1, _2);
    srv.setCallback(f);
  }
void ocvFaceDetectApp::setup()
{
	mFaceCascade.load( getAssetPath( "haarcascade_frontalface_alt.xml" ).string() );
	mEyeCascade.load( getAssetPath( "haarcascade_eye.xml" ).string() );	
	
	mCapture = Capture( 640, 480 );
	mCapture.start();
}
Exemplo n.º 3
0
int main(int argc, char** argv)
{
	// Load the cascade classifiers
	// Make sure you point the XML files to the right path, or 
	// just copy the files from [OPENCV_DIR]/data/haarcascades directory
	face_cascade.load("haarcascade_frontalface_alt2.xml");
	eye_cascade.load("haarcascade_eye.xml");

	// Open webcam
	cv::VideoCapture cap(0);

	// Check if everything is ok
	if (face_cascade.empty() || eye_cascade.empty() || !cap.isOpened())
		return 1;

	// Set video to 320x240
	cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);

	cv::Mat frame, eye_tpl;
	cv::Rect eye_bb;

	while (cv::waitKey(15) != 'q')
	{
		cap >> frame;
		if (frame.empty())
			break;

		// Flip the frame horizontally, Windows users might need this
		cv::flip(frame, frame, 1);

		// Convert to grayscale and 
		// adjust the image contrast using histogram equalization
		cv::Mat gray;
		cv::cvtColor(frame, gray, CV_BGR2GRAY);

		if (eye_bb.width == 0 && eye_bb.height == 0)
		{
			// Detection stage
			// Try to detect the face and the eye of the user
			detectEye(gray, eye_tpl, eye_bb);
		}
		else
		{
			// Tracking stage with template matching
			trackEye(gray, eye_tpl, eye_bb);

			// Draw bounding rectangle for the eye
			cv::rectangle(frame, eye_bb, CV_RGB(0,255,0));
		}

		// Display video
		cv::imshow("video", frame);
	}

	return 0;
}
Exemplo n.º 4
0
 FaceDetector(){
     if(!face_cascade_.load(face_cascade_name)){
         std::cerr << "failed to load : " << face_cascade_name;
     }
     if(!smile_cascade_.load(smile_cascade_name)){
         std::cerr << "failed to load : " << smile_cascade_name;
     }
     if(!eyes_cascade_.load(eyes_cascade_name)){
         std::cerr << "failed to load : " << eyes_cascade_name;
     }
 }
void ocvFaceDetectApp::setup()
{
#if defined( CINDER_MAC )
	mFaceCascade.load( getResourcePath( "haarcascade_frontalface_alt.xml" ) );
	mEyeCascade.load( getResourcePath( "haarcascade_eye.xml" ) );	
#else
	mFaceCascade.load( getAppPath() + "../../resources/haarcascade_frontalface_alt.xml" );
	mEyeCascade.load( getAppPath() + "../../resources/haarcascade_eye.xml" );	
#endif
	
	mCapture = Capture( 640, 480 );
	mCapture.start();
}
Exemplo n.º 6
0
int main( int argc, char** argv )
{
	ros::init(argc, argv, "face_det_service");

	if( !face_cascade.load( "/opt/ros/hydro/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml" ) )
	{
		std::cerr << "Error: loading classifier failed" << std::endl;
		return -1;
	}

	ros::NodeHandle n;

	// Subscribe input camera images
    image_transport::ImageTransport it(n);
    image_transport::Subscriber sub = it.subscribe("image_in", 10, imageCallback);

    // Create a new service server and register the callback
    ros::ServiceServer service = n.advertiseService("detect_faces", detect);

    ROS_INFO_STREAM("Face detection service initialized and listening...");

    ros::spin();

    return 1;
}
int main()
{
	if (!faceDetector.load("resources/haarcascade_frontalface_alt_tree.xml")) {
		std::cout << "Failed to load classifier" << std::endl;
		getchar();
		return -1;
	}
	cv::VideoCapture capture(CV_CAP_ANY);  //Capture using any camera connected to your system
	if (!capture.isOpened())
		return -1;
	
	char key;
	cv::namedWindow("Camera_Output", 1);    //Create window
	while (true){ //Create infinte loop for live streaming
		cv::Mat frame;
		capture >> frame; //Create image frames from capture

		cv::Mat faceDetectedFrame = detectFace(frame);

		cv::imshow("Camera_Output", faceDetectedFrame);   //Show image frames on created window
		key = cv::waitKey(1);     //Capture Keyboard stroke
		if (char(key) == 27){
			break;      //If you hit ESC key loop will break.
		}
	}

	cv::destroyAllWindows();

	return 0;
}
Exemplo n.º 8
0
int main(int arc, char **argv){
  std::vector <cv::Mat> images;
  std::vector <int> labels;

  face_cascade.load("config/haarcascade_frontalface_alt.xml");
  read_csv("faces.csv", images, labels);
}
Exemplo n.º 9
0
int main( int argc, char** argv )
{
	ros::init(argc, argv, "face_det_node");

	if( !face_cascade.load( "/opt/ros/hydro/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml" ) )
	{
		std::cerr << "Error: loading classifier failed" << std::endl;
		return -1;
	}

	ros::NodeHandle n;

	// Subscribe input camera images
    image_transport::ImageTransport it(n);
    image_transport::Subscriber sub = it.subscribe("image_in", 10, imageCallback);

    // Advertise detected faces
    pub = n.advertise<workshop_msgs::DetectionsStamped>("face_detections_out", 1000);

    ROS_INFO_STREAM("Face detector initialized and listening...");

    ros::spin();

    return 1;
}
Exemplo n.º 10
0
bool MainWindow::Init()
{
    if( !face_cascade.load( face_cascade_name) ){
        qDebug()<<"级联分类器错误,可能未找到文件,拷贝该文件到工程目录下!";
        return false;
    }
    if(m_str_url.isEmpty())
        return false;
    //打开视频流
    int result=avformat_open_input(&pAVFormatContext, m_str_url.toStdString().c_str(),NULL,NULL);
    if (result<0){
        qDebug()<<"打开视频流失败";
        return false;
    }

    //获取视频流信息
    result=avformat_find_stream_info(pAVFormatContext,NULL);
    if (result<0){
        qDebug()<<"获取视频流信息失败";
        return false;
    }

    //获取视频流索引
    videoStreamIndex = -1;
    for (uint i = 0; i < pAVFormatContext->nb_streams; i++) {
        if (pAVFormatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStreamIndex = i;
            break;
        }
    }

    if (videoStreamIndex==-1){
        qDebug()<<"获取视频流索引失败";
        return false;
    }

    //获取视频流的分辨率大小
    pAVCodecContext = pAVFormatContext->streams[videoStreamIndex]->codec;
    videoWidth=pAVCodecContext->width;
    videoHeight=pAVCodecContext->height;

    avpicture_alloc(&pAVPicture,AV_PIX_FMT_RGB24,videoWidth,videoHeight);

    AVCodec *pAVCodec;

    //获取视频流解码器
    pAVCodec = avcodec_find_decoder(pAVCodecContext->codec_id);
    pSwsContext = sws_getContext(videoWidth,videoHeight,AV_PIX_FMT_YUV420P,videoWidth,videoHeight,AV_PIX_FMT_RGB24,SWS_BICUBIC,0,0,0);

    //打开对应解码器
    result=avcodec_open2(pAVCodecContext,pAVCodec,NULL);
    if (result<0){
        qDebug()<<"打开解码器失败";
        return false;
    }

    qDebug()<<"初始化视频流成功";
    return true;
}
Exemplo n.º 11
0
/**
 * @function main
 */
int main( int argc, const char** argv ) {
  CvCapture* capture;
  cv::Mat frame;

  // Load the cascades
  if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };

  cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(main_window_name, 400, 100);
  cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(face_window_name, 10, 100);
  cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Right Eye", 10, 600);
  cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Left Eye", 10, 800);
  cv::namedWindow("aa",CV_WINDOW_NORMAL);
  cv::moveWindow("aa", 10, 800);
  cv::namedWindow("aaa",CV_WINDOW_NORMAL);
  cv::moveWindow("aaa", 10, 800);

  createCornerKernels();
  ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
          43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);

   // Read the video stream
  capture = cvCaptureFromCAM( -1 );
  if( capture ) {
    while( true ) {
      frame = cvQueryFrame( capture );
      // mirror it
      cv::flip(frame, frame, 1);
      frame.copyTo(debugImage);

      // Apply the classifier to the frame
      if( !frame.empty() ) {
        detectAndDisplay( frame );
      }
      else {
        printf(" --(!) No captured frame -- Break!");
        break;
      }

      imshow(main_window_name,debugImage);

      int c = cv::waitKey(10);
      if( (char)c == 'c' ) { break; }
      if( (char)c == 'f' ) {
        imwrite("frame.png",frame);
      }

    }
  }

  releaseCornerKernels();

  return 0;
}
Exemplo n.º 12
0
    void update(double time,
                uint32_t* out,
                const uint32_t* in)
    {
        if (cascade.empty()) {
            cv::setNumThreads(cvRound(threads * 100));
            if (classifier.length() > 0) {
                if (!cascade.load(classifier.c_str()))
                    fprintf(stderr, "ERROR: Could not load classifier cascade %s\n", classifier.c_str());
            }
            else {
                memcpy(out, in, size * 4);
                return;
            }
        }

        // sanitize parameters
        search_scale = CLAMP(search_scale, 0.11, 1.0);
        neighbors = CLAMP(neighbors, 0.01, 1.0);

        // copy input image to OpenCV
        image = cv::Mat(height, width, CV_8UC4, (void*)in);

        // only re-detect periodically to control performance and reduce shape jitter
        int recheckInt = abs(cvRound(recheck * 1000));
        if ( recheckInt > 0 && count % recheckInt )
        {
            // skip detect
            count++;
//            fprintf(stderr, "draw-only counter %u\n", count);
        }
        else
        {
            count = 1;   // reset the recheck counter
            if (objects.size() > 0) // reset the list of objects
                objects.clear();
            double elapsed = (double) cvGetTickCount();

            objects = detect();

            // use detection time to throttle frequency of re-detect vs. redraw (automatic recheck)
            elapsed = cvGetTickCount() - elapsed;
            elapsed = elapsed / ((double) cvGetTickFrequency() * 1000.0);

            // Automatic recheck uses an undocumented negative parameter value,
            // which is not compliant, but technically feasible.
            if (recheck < 0 && cvRound( elapsed / (1000.0 / (recheckInt + 1)) ) <= recheckInt)
                    count += recheckInt - cvRound( elapsed / (1000.0 / (recheckInt + 1)));
//            fprintf(stderr, "detection time = %gms counter %u\n", elapsed, count);
        }
        
        draw();

        // copy filtered OpenCV image to output
        memcpy(out, image.data, size * 4);
    }
void TellThatToMyCamera_v1_0App::setup()
{
    mExpressionsCascade.load(getAssetPath("haarcascade_frontalface_alt.xml").string());
    mPath= getAssetPath("ppdtest.csv").string();
    
	mCapture = Capture( 640, 480 );                 // Camera settings
	mCapture.start();
    
    read_csv(mPath, mDBimgFaces, mDBLabels);        // Read DB of faces for FaceRec algorithm
    mFisherFaceRec->train(mDBimgFaces, mDBLabels);  // Train the Fisher Face Recognizer algorithm
}
Exemplo n.º 14
0
void FaceFrameCutter::init()
{
	if (!faceCascade_.load(haar_))
    {
    	std::cerr<<"--(!)Error loading"<<std::endl;
    	exit(-1);
    };
	{
		string cmd("rm -rf ");
		cmd += outputPitures_;
		system( cmd.c_str());
	}
}
Exemplo n.º 15
0
/**
 * @function main
 */
int main( int argc, const char** argv ) {
  cv::Mat frame;

  // Load the cascades
  if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };

  cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(main_window_name, 400, 100);
  cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(face_window_name, 10, 100);
  cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Right Eye", 10, 600);
  cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Left Eye", 10, 800);
  cv::namedWindow("aa",CV_WINDOW_NORMAL);
  cv::moveWindow("aa", 10, 800);
  cv::namedWindow("aaa",CV_WINDOW_NORMAL);
  cv::moveWindow("aaa", 10, 800);

  createCornerKernels();
  ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
          43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);

  frame = cv::imread(argv[1]);
  frame.copyTo(debugImage);

  cv::Mat result;
      // Apply the classifier to the frame
      if( !frame.empty() ) {
        result = detectAndDisplay( frame );
      }
      else {
        printf(" cannot read image. terminating");
        return -1;
      }

      imshow(main_window_name,debugImage);

      std::stringstream result_filename;
      result_filename << argv[1];
      result_filename << "_eyes.jpg";
      
      imwrite(result_filename.str().c_str(), result);
      std::cout << "written file: " << result_filename.str() << std::endl;

  releaseCornerKernels();

  return 0;
}
Exemplo n.º 16
0
int main(void) {
  CGImageRef image;
  cv::Mat frame;

  // 1. Load the cascades
  if (!face_cascade.load(face_cascade_name)) {
    printf("Error loading face cascade\n"); 
    return -1;
  }

  if (!eyes_cascade.load(eyes_cascade_name)) {
    printf("Error loading eyes cascade\n"); 
    return -1;
  }

  // 2. Read the video stream
  
  while (true) {

    image = getDesktopImage();
    frame = CGImageToMat(image);

    if (frame.empty()) {
      printf("No captures frame -- breaking");
      break;
    }

    // 3. Apply the classifier to the frame
    detectAndDisplay(frame);

    int c = cv::waitKey(10);
    if ((char)c == 27) { break; } // escape
  }

  return 0;
}
// Programa principal.
int main(int argc, char** argv){

	// Variables necesarias.
	cv::VideoCapture camera;
	int imagenes_generadas = 0;
	std::string nombre_base = "frame";

	// Abrimos el dispositivo de video auxiliar.
	camera = cv::VideoCapture(1);
	
	// Si no se ha podido abrir, se abre el dispositivo por defecto.	
	if( !camera.isOpened() ){
		std::cerr << "ADVERTENCIA: No se ha podido abrir la video-cámara USB... Abriendo cámara por defecto...\n";
		camera = cv::VideoCapture(0);
		if( !camera.isOpened() ){
			std::cerr << "ERROR: No se ha podido abrir la video-cámara por defecto...\n";
			return -1;
		}
	}
	
	if(!hand_detector.load("../data/hands_toy.xml")){
		std::cerr << "No se ha podido abrir el clasificador.\n";
		return -1;
	}
	
	
	// Mientras nos se presione Esc, capturamos frames.
	while(imagenes_generadas < MAX_IMAGENES){
		cv::Mat frame;
		camera >> frame;
		
		
		
		if( detect_hand(frame) ){
			cv::imwrite( nombre_base + std::to_string(imagenes_generadas)+".png", frame );
			imagenes_generadas++;
		}
		
		

		cv::imshow("video stream", frame);
		if(cv::waitKey(30) >= 0)
			break;
	}// Fin while.
	
	return 0;
}// Fin main.
Exemplo n.º 18
0
int main() {
  RASPIVID_CONFIG* config = (RASPIVID_CONFIG*)malloc(sizeof(RASPIVID_CONFIG));
  config -> width = 320;
  config -> height = 240;
  config -> bitrate = 0;
  config -> framerate = 0;
  config -> monochrome = 1;

  RaspiCamCvCapture * camera = raspiCamCvCreateCameraCapture2(0, config);
  free(config);

  IplImage* image = raspiCamCvQueryFrame(camera);
  cvNamedWindow("Display", CV_WINDOW_AUTOSIZE);

  if (faceCascade.load(faceCascadePath)) {
    display(image);
  }

  return 0;
}
Exemplo n.º 19
0
void init(){
  // Load the cascades
  if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n");};

  cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(main_window_name, 400, 100);
  cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow(face_window_name, 10, 100);
  cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Right Eye", 10, 600);
  cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
  cv::moveWindow("Left Eye", 10, 800);
  
  createCornerKernels();
  ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
          43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);

   // Read the video stream
  capture.open( -1 );

}
Exemplo n.º 20
0
/**
 * @function main
 */
int fd_init( ) 
{
  // Load the cascades
  if( !face_cascade.load( face_cascade_name ) ){ 
	printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };

  cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow (main_window_name, 400, 100);
  cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
  cv::moveWindow (face_window_name, 10, 100);
#ifdef SHOW_EYES
  cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
  cv::moveWindow ("Right Eye", 10, 600);
  cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
  cv::moveWindow ("Left Eye", 10, 800);
#endif

  createCornerKernels();
  ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
          43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);
	printf("fd_init() done \n" );
}
Exemplo n.º 21
0
void ICPApp::setup()
{
    mExpressionsCascade.load(getAssetPath("haarcascade_frontalface_alt.xml").string());
    mPath= getAssetPath("ppdtest.csv").string();
    
	mCapture = Capture( 640, 480 );                 // Camera settings
	mCapture.start();
    
    read_csv(mPath, mDBimgFaces, mDBLabels);        // Read DB of faces for FaceRec algorithm
    mFisherFaceRec->train(mDBimgFaces, mDBLabels);  // Train the Fisher Face Recognizer algorithm
    
    // FOR TESTING PURPOSES
    //    mSurf=(loadImage("/Users/PpD/Desktop/EcA - Pp DanY/MSc ICP/Semester 2/ICP 3/Faces DB Original/hugh_laurie_extra1.jpg"));
    //  mTexture = gl::Texture(mCinderDBimgFaces);
    //  mTexture = gl::Texture( fromOcv( input ) );
    //  cv::Mat output;
    //  mTexture = gl::Texture( fromOcv( loadImage("/Users/PpD/Desktop/emotionsrec2/data/emotions/0neutral/amy_adams_neutral.jpg") ) );    
    //  mDBLabelsTEST.push_back(0);
    //  mDBLabelsTEST.push_back(1);
    //  mFisherFaceRec->train(mDBimgFaces, mDBLabelsTEST);
    //  mFisherFaceRec->train(mDBimgFaces, mDBLabels);
}
Exemplo n.º 22
0
void init()
{
    //摄像头
    camera.open(0);

    if(!camera.isOpened())
    {
        std::cout << "Can not find camera!" << std::endl;
        exit(-1);
    }

    camera.set(cv::CAP_PROP_FRAME_WIDTH,160);
    camera.set(cv::CAP_PROP_FRAME_HEIGHT,120);

/*    double width = camera.get(cv::CAP_PROP_FRAME_WIDTH);
    double height = camera.get(cv::CAP_PROP_FRAME_HEIGHT);

    std::cout << "width:" << width << "\t";
    std::cout << "height:" << height << "\t" << std::endl;*/

    face_cascade_name = "/usr/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";

    if(!face_cascade.load(face_cascade_name))
    {
        std::cout << "can not find face_cascade_file!" << std::endl;
        exit(-1);
    }

    running = true;
    pthread_mutex_init(&mutexLock, NULL);
    pthread_create(&grabThread, NULL, grabFunc, NULL);

    //Setup edi robot mraa control lib
    spider_init();

    signal(SIGINT, clear);
    signal(SIGTERM, clear);
}
Exemplo n.º 23
0
int main( int argc, char** argv )
{
  ros::init(argc, argv, "face_detect");
  ros::NodeHandle n;
  
  opencvCommands = n.advertise<robotBrain::opencvMessage>("opencv_commands",1000);
  image_transport::ImageTransport it(n);
  pub = it.advertise("camera/image", 1);
  
  cv::VideoCapture cap(1);
  
  if(!cap.isOpened()){
		ROS_FATAL("opencv:  COULD NOT OPEN CAMERA" );
		sendError();
	}
  
  //Load the cascades
	
  if( !face_cascade.load( face_cascade_name ) ){ 
    ROS_FATAL("--(!)Error loading FACE CASCADE(!)--" ); 
    sendError(); 
  }
  if( !eyes_cascade.load( eyes_cascade_name ) ){ 
    ROS_FATAL("--(!)Error loading EYE CASCADE(!)--"); 
    sendError(); 
  }
  
    cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
  char key;
    while ( n.ok() ){
      cap.read(frame);
      if( !frame.empty() ) colorDetect();//detectAndDisplay(); 
      else {
	ROS_FATAL("opencv: FRAME FAIL " );
	sendError();	
      }
	//showing image
	cv::namedWindow( "Patrolling Android View", CV_WINDOW_AUTOSIZE );
  	cv::startWindowThread();
	cv::imshow( "Patrolling Android View", imgHSV);
      if (color & !face ) faceDetect();			//if we have a color and we havent previously detected a face we look for a face
      
      if(face) personTracking();				//if we have a face we follow the color 
      else {							//if not 
	message.camera = 's';
	message.errorOpenCV = '0';
	opencvCommands.publish( message );
      }
      key = cv::waitKey(100);
switch (key)
        {
            //hue
            case 'q':
                if (hue_low == hue_high)
                    ROS_INFO("hue_low must be less than hue_high");
                else 
                hue_low = hue_low + 1;
            break;
            case 'a':
                if (hue_low == 0) 
                    ROS_INFO("Hue is minimum");
                else
                    hue_low = hue_low - 1;
            break;
            case 'w':
                if (hue_high == 255)
                    ROS_INFO("Hue is maximum");
                else
                    hue_high = hue_high + 1;
            break;
            case 's':
                if (hue_high == hue_low)
                    ROS_INFO("hue_high must be greater than hue_low");
                else
                    hue_high = hue_high - 1;
            break;
           
            //saturation 
            case 'e':
                if (sat_low == sat_high)
                    ROS_INFO("sat_low must be less than sat_high");
                else 
                sat_low = sat_low + 1;
            break;
            case 'd':
                if (sat_low == 0) 
                    ROS_INFO("sat is minimum");
                else
                    sat_low = sat_low - 1;
            break;
            case 'r':
                if (sat_high == 255)
                    ROS_INFO("sat is maximum");
                else
                    sat_high = sat_high + 1;
            break;
            case 'f':
                if (sat_high == sat_low)
                    ROS_INFO("sat_high must be greater than sat_low");
                else
                    sat_high = sat_high - 1;
            break;
            
            //value 
            case 't':
                if (val_low == val_high)
                    ROS_INFO("val_low must be less than val_high");
                else 
                val_low = val_low + 1;
            break;
            case 'g':
                if (val_low == 0) 
                    ROS_INFO("val is minimum");
                else
                    val_low = val_low - 1;
            break;
            case 'y':
                if (val_high == 255)
                    ROS_INFO("val is maximum");
                else
                    val_high = val_high + 1;
            break;
            case 'h':
                if (val_high == val_low)
                    ROS_INFO("val_high must be greater than val_low");
                else
                    val_high = val_high - 1;
            break;
        }
      //ROS_INFO("Frames");
	ROS_INFO("Hue: %d-%d\tSat: %d-%d\tVal: %d-%d\n", hue_low, hue_high, sat_low, sat_high, val_low, val_high);
   }
  ROS_FATAL("COULD NOT CAPTURE FRAME");
  return -1;
}
Exemplo n.º 24
0
/**
 * @function main
 */
int main( int argc, char **argv )
{
    // OPENCV INIT
    //-- Load the cascades
    if( !face_cascade.load( face_cascade_name ) )
    {
        fprintf(stderr,"-- (!) ERROR loading 'haarcascade_frontalface_alt.xml'\n");
        fprintf(stderr,"Please edit 'face_cascade_name' path in main.cpp:22 and recompile the project.\n");
        return -1;
    };

    // VIDEO CAPTURE
    //-- start video capture from camera
    capture = new cv::VideoCapture(0);

    //-- check that video is working
    if ( capture == NULL || !capture->isOpened() ) {
        fprintf( stderr, "Could not start video capture\n" );
        return 1;
    }

    // CAMERA IMAGE DIMENSIONS
    camWidth = (int) capture->get( CV_CAP_PROP_FRAME_WIDTH );
    camHeight = (int) capture->get( CV_CAP_PROP_FRAME_HEIGHT );

    // GLUT INIT
    glutInit( &argc, argv );
    glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH);
    if(!bFullScreen){
        windowWidth = camWidth*1.5;
        windowHeight = camHeight*1.5;
        cx = pixelToCm(windowWidth);
        cy = pixelToCm(windowHeight);
        glutInitWindowPosition( 200, 80 );
        glutInitWindowSize( windowWidth, windowHeight );
    }
    glutCreateWindow( "ScreenReality - Vision 3D" );

    // RENDERING INIT
    glEnable(GL_DEPTH_TEST);
    glEnable(GL_COLOR_MATERIAL);
    glEnable(GL_LIGHT0); //Enable light #0
    glEnable(GL_LIGHT1); //Enable light #1
    glEnable(GL_NORMALIZE); //Automatically normalize normals

    // SCREEN DIMENSIONS
    if(bFullScreen)
    {
        glutFullScreen();
        windowWidth = glutGet(GLUT_SCREEN_WIDTH);
        windowHeight = glutGet(GLUT_SCREEN_HEIGHT);
        cx = pixelToCm(windowWidth);
        cy = pixelToCm(windowHeight);
        glViewport( 0, 0, windowWidth, windowHeight );
    }

    // GUI CALLBACK FUNCTIONS
    glutDisplayFunc( redisplay );
    glutReshapeFunc( onReshape );
    glutMouseFunc( onMouse );
    glutKeyboardFunc( onKeyboard );
    glutIdleFunc( onIdle );

    // GUI LOOP
    glutMainLoop();

    return 0;
}
Exemplo n.º 25
0
int main(int argc, char **argv)
{
    ros::init(argc, argv, "people_counting");
    ros::NodeHandle node;

    ros::param::param<std::string>(ros::this_node::getName() + "dataset", fileDataset_param, "haarcascade_upperbody.xml");
    ros::param::param<double>(ros::this_node::getName() + "scaleFactor", scaleFactor_param, 0.1);
    ros::param::param<int>(ros::this_node::getName() + "minNeighbors", minNeighbors_param, 0);
    // ros::param::param<double>("minSize", minSize_param, 0);
    ros::param::param<double>(ros::this_node::getName() + "maxSize", maxSize_param, 1.0);

    // argc--; argv++;
    // while( argc && *argv[0] == '-' )
    // {
    //     if( !strcmp(*argv, "-fps") && ( argc > 1 ) )
    //     {
    //         fps = atof(*(argv+1));
    //         printf("With %ffps\n",fps);
    //         argc--; argv++;
    //     }
    //     if( !strcmp(*argv, "-crowd_dataset") && ( argc > 1 ) )
    //     {
    //         file_dataset = *(argv+1);
    //         printf("Using dataset form %s\n",file_dataset.c_str());
    //         argc--; argv++;
    //     }
    //     argc--; argv++;
    // }

    std::vector<cv::Rect> objects;
    std::vector<cv::Scalar> colors;
    cv::RNG rng(12345);

    img_msg.encoding = "bgr8";

    if ( ! classifier.load(fileDataset_param))
        ROS_ERROR("Can't open %s", fileDataset_param.c_str());

    ros::Publisher img_pub = node.advertise<sensor_msgs::Image>("crowd/count_img", 100);
    ros::Publisher count_pub = node.advertise<std_msgs::UInt16>("crowd/people_count", 100);

    image_transport::ImageTransport it(node);
    image_transport::Subscriber img_sub = it.subscribe("image", 1, imageCallback);

    ros::Rate loop_rate(fps);

    while (ros::ok())
    {
        if (! img_msg.image.empty()) {
            classifier.detectMultiScale(img_msg.image, objects, 1.0+scaleFactor_param, minNeighbors_param+1, 0, cv::Size(), maxSize);
            for (int j = 0; j < objects.size(); ++j) {
                if ( j > max_detected) {
                    colors.push_back(cv::Scalar(rng.uniform(0,255),rng.uniform(0, 255),rng.uniform(0, 255)));
                    max_detected = j;
                }
                cv::rectangle(img_msg.image, objects[j], colors[j], 2);
            }
            people_count.data = (u_int16_t)objects.size();
        }
        img_pub.publish(img_msg.toImageMsg());
        count_pub.publish(people_count);

        ros::spinOnce();
        loop_rate.sleep();
    }

    return 0;
}
Exemplo n.º 26
0
void scanWebcam()
{
	//Check that the cascade file was loaded
	if(!face_cascade1.load( face_cascade_name1 ))
	{
		cout << "Error while loading cascade files" << endl;                            
	} 

	CvCapture* capture;
	cv::Mat frame;

	//Connect to the video stream
	capture = cvCaptureFromFile(PATH_TO_CAM);

	//If the connection was successful 
	if(capture)
	{
		//Create a FaceRecognizer object that uses the Fisherfaces algorithm (also works with the eigenfaces and LBPH algorithms)
		cv::Ptr<cv::FaceRecognizer> fisherfaces = cv::createFisherFaceRecognizer();

		//Load the database that was previously created during the training phase
		cv::FileStorage fs_fisher(PATH_TO_XML_FISHERFACES, cv::FileStorage::READ);
		fisherfaces->load(fs_fisher);

		//Infinite loop to detect the faces continuously
		while(true)
		{
			//Get one picture from the videostream (The facial recognition is done on images from the video and not directly from the videostream)
			frame = cvQueryFrame( capture );
			cv::namedWindow("test");

			//Check that one image was successfully extracted from the video
			if(!frame.empty())
			{
				//Variables used for the id process
				int predictedLabel = -1;
				double predictedConfidence = 0.0;

				std::vector<cv::Rect> faces; //Contains the rectangle coordinates in which the face will be included
				cv::Mat frame_gray; //Grey image
				cvtColor( frame, frame_gray, CV_RGB2GRAY ); //Converts the image from RGB to shades of grey
				equalizeHist( frame_gray, frame_gray ); //Histogram equalization
				
				//We perform a face detection
				face_cascade1.detectMultiScale( frame_gray, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, cv::Size(30, 30) );
				

				//If at least one face was detected then we can perform an identification
				for(int i=0; i<faces.size();i++)
				{
					//Get only (crop) the face (shades of grey)
					cv::Mat croppedFace = frame_gray(cv::Rect(faces[i].x, faces[i].y, faces[i].width, faces[i].height));
					//Resize the image
					cv::resize(croppedFace, croppedFace, sizeOfImage);
					
					//Start the identification
					fisherfaces->predict(croppedFace, predictedLabel, predictedConfidence);
					
					//Print the result in the console
					cout << "##### ID " << predictedLabel << "    confidence : " << predictedConfidence;

					int id=predictedLabel;
					const int THRESHOLD = 1000; //Threshold for the facial recognition. Used to make sure that the face was properly recognized.

					string printedName;

					cv::Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );

					//Print the ID result on the video (it's really bad to do it this way !! A funtion should be created !)
					if(id==1 && predictedConfidence>THRESHOLD)
					{
						printedName="Adrien";
						//Print the circle around the face
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						//Print the person's name
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else if(id==2 && predictedConfidence>THRESHOLD)
					{
						printedName="Ofir";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else if(id==3 && predictedConfidence>THRESHOLD)
					{
						printedName="Jeremie";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,255,0), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,255,0), 2, 8, false );
					}
					else
					{
						printedName="UNKNOWN";
						ellipse( frame, center, cv::Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, cv::Scalar(0,0,255), 4, 8, 0);
						cv::putText(frame,printedName, center, cv::FONT_HERSHEY_SIMPLEX, 1.0f, cv::Scalar(0,0,255), 2, 8, false );
					}
					
				}
				cout << endl;

				//Print each images to recreate a video
				cv::imshow("test", frame);
			}	
			else
			{
				cout << " --(!) No captured frame -- Break!" << endl;
				break;
			}

			int c = cv::waitKey(10);
		}

	}
}
Exemplo n.º 27
0
// Main function, defines the entry point for the program.
int main( int argc, char** argv )
{
    cv::VideoCapture capture;
    cv::Mat frame;
    cascade.load(cascade_name);
    
    // This works on a D-Link CDS-932L
    const std::string videoStreamAddress = "http://192.168.1.253/nphMotionJpeg?Resolution=320x240&amp;Quality=Standard&.mjpg";

    //open the video stream and make sure it's opened
    if(!capture.open(videoStreamAddress)) {
        std::cout << "Error opening video stream or file" << std::endl;
        return -1;
    }

    // Create a new named window with title: result
    cvNamedWindow( "Result", 1 );

    // Find if the capture is loaded successfully or not.

    // If loaded succesfully, then:
    // Capture from the camera.
    for(;;)
    {   
        cv::Mat gray;
        for(int i=0;i<10;i++)
            capture.grab();
        capture >> frame;
        cv::cvtColor(frame, gray, CV_BGR2GRAY);

        std::vector<cv::Rect> results;
        cascade.detectMultiScale(gray, results, 1.1, 3, 0);
        int dx=0;
        int dy=0;
        for(std::vector<cv::Rect>::iterator it=results.begin();
        it!=results.end();
        it++)
        {
          cv::Rect r = *it;
          std::cout << "_" << r.x << "_\t_" << r.y << "_\t" << std::endl;
          dx = r.x + r.width/2;
          dy = r.y + r.height/2;
        }
        std::stringstream ss;
        ss << "wget -q -O /dev/null \"http://192.168.1.253/nphControlCamera?Width=";
        ss << gray.cols;
        ss << "&Height=";
        ss << gray.rows;
        ss << "&Direction=Direct&NewPosition.x=" << dx;
        ss << "&NewPosition.y=" << dy<<"\"";
        if((dx!=0)||(dy!=0))
        {
            std::cout << ss.str() << std::endl;
            system(ss.str().c_str());
                
        }
        imshow("Result",gray);
        // Wait for a while before proceeding to the next frame
        if( cvWaitKey( 10 ) >= 0 )
            break;
        
    }

    return 0;
}
/*
 * Class:     io_github_melvincabatuan_fullbodydetection_MainActivity
 * Method:    predict
 * Signature: (Landroid/graphics/Bitmap;[B)V
 */
JNIEXPORT void JNICALL Java_io_github_melvincabatuan_fullbodydetection_MainActivity_predict
  (JNIEnv * pEnv, jobject clazz, jobject pTarget, jbyteArray pSource){

   AndroidBitmapInfo bitmapInfo;
   uint32_t* bitmapContent; // Links to Bitmap content

   if(AndroidBitmap_getInfo(pEnv, pTarget, &bitmapInfo) < 0) abort();
   if(bitmapInfo.format != ANDROID_BITMAP_FORMAT_RGBA_8888) abort();
   if(AndroidBitmap_lockPixels(pEnv, pTarget, (void**)&bitmapContent) < 0) abort();

   /// Access source array data... OK
   jbyte* source = (jbyte*)pEnv->GetPrimitiveArrayCritical(pSource, 0);
   if (source == NULL) abort();

   /// cv::Mat for YUV420sp source and output BGRA 
    Mat srcGray(bitmapInfo.height, bitmapInfo.width, CV_8UC1, (unsigned char *)source);
    Mat mbgra(bitmapInfo.height, bitmapInfo.width, CV_8UC4, (unsigned char *)bitmapContent);

/***********************************************************************************************/
    /// Native Image Processing HERE... 
    if(DEBUG){
      LOGI("Starting native image processing...");
    }

    if (full_body_cascade.empty()){
       t = (double)getTickCount();
       sprintf( full_body_cascade_path, "%s/%s", getenv("ASSETDIR"), "haarcascade_fullbody.xml");       
    
      /* Load the face cascades */
       if( !full_body_cascade.load(full_body_cascade_path) ){ 
           LOGE("Error loading cat face cascade"); 
           abort(); 
       };

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
       LOGI("Loading full body cascade took %lf milliseconds.", t);
     }
    }
            
 
     std::vector<Rect> fbody;


       //-- Detect full body
       t = (double)getTickCount();
 
       /// Detection took cat_face_cascade.detectMultiScale() time = 655.334471 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.1, 2 , 0 , Size(30, 30) ); // Scaling factor = 1.1;  minNeighbors = 2 ; flags = 0; minimumSize = 30,30

      // cat_face_cascade.detectMultiScale() time = 120.117185 ms
      // cat_face_cascade.detectMultiScale( srcGray, faces, 1.2, 3 , 0 , Size(64, 64));

 
      
      full_body_cascade.detectMultiScale( srcGray, fbody, 1.2, 2 , 0 , Size(14, 28));  // Size(double width, double height) 

      // scalingFactor parameters determine how much the classifier will be scaled up after each run.
      // minNeighbors parameter specifies how many positive neighbors a positive face rectangle should have to be considered a possible match; 
      // when a potential face rectangle is moved a pixel and does not trigger the classifier any more, it is most likely that it’s a false positive. 
      // Face rectangles with fewer positive neighbors than minNeighbors are rejected. 
      // If minNeighbors is set to zero, all potential face rectangles are returned. 
      // The flags parameter is from the OpenCV 1.x API and should always be 0. 
      // minimumSize specifies the smallest face rectangle we’re looking for. 

       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("full_body_cascade.detectMultiScale() time = %lf milliseconds.", t);
      }


       // Iterate through all faces and detect eyes
       t = (double)getTickCount();

       for( size_t i = 0; i < fbody.size(); i++ )
       {
          Point center(fbody[i].x + fbody[i].width / 2, fbody[i].y + fbody[i].height / 2);
          ellipse(srcGray, center, Size(fbody[i].width / 2, fbody[i].height / 2), 0, 0, 360, Scalar(255, 0, 255), 4, 8, 0);
       }//endfor
  
       t = 1000*((double)getTickCount() - t)/getTickFrequency();
       if(DEBUG){
          LOGI("Iterate through all faces and detecting eyes took %lf milliseconds.", t);
       }

       /// Display to Android
       cvtColor(srcGray, mbgra, CV_GRAY2BGRA);


      if(DEBUG){
        LOGI("Successfully finished native image processing...");
      }
   
/************************************************************************************************/ 
   
   /// Release Java byte buffer and unlock backing bitmap
   pEnv-> ReleasePrimitiveArrayCritical(pSource,source,0);
   if (AndroidBitmap_unlockPixels(pEnv, pTarget) < 0) abort();
}
Exemplo n.º 29
0
/**
 * @function main
 */
int main( int argc, const char** argv ) {

	srand(time(NULL)); int ra;

/*	char circ_window[] = "Moving dot";

	Mat circ_image = Mat::zeros( 400, 400, CV_8UC3 );
	MyFilledCircle( circ_image, Point( 100, 100) );
	imshow( circ_window, circ_image );
	cv::setWindowProperty( circ_window, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
	moveWindow( circ_window, 900, 200 );*/
	//sleep(8);

	CvCapture* capture;
	cv::Mat frame;
	
	// Load the cascades
	if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };

	cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
	cv::moveWindow(main_window_name, 400, 100);
	cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
	cv::moveWindow(face_window_name, 10, 100);
	cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
	cv::moveWindow("Right Eye", 10, 600);
	cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
	cv::moveWindow("Left Eye", 10, 800);
	cv::namedWindow("aa",CV_WINDOW_NORMAL);
	cv::moveWindow("aa", 10, 800);
	cv::namedWindow("aaa",CV_WINDOW_NORMAL);
	cv::moveWindow("aaa", 10, 800);

	createCornerKernels();
	ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2),
			43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1);

	// Read the video stream
	capture = cvCaptureFromCAM( -1 );
	if( capture ) {
		while( true ) {
	
	char circ_window[] = "Moving dot";

	Mat circ_image = Mat::zeros( 414, 414, CV_8UC3 );
	//ra = rand()%4;
	//if (ra==1) rx+=1; else if(ra==2) rx-=1; else if(ra==3) ry+=1; else ry-=1; rx+=1; if(rx==500) rx=0;

	if(stage1 && !stage2)
		if(rx>=6 && rx <=400 && ry==6)
		{
			rx+= 10;
			tl+= lpy;
			tr+= rpy;
			countert ++;
		}
		else if(rx>=400 && ry<400)
		{
			ry+=10;
			rl+= lpx;
			rr+= rpx;
			counterl ++;
		}
		else if(ry>=400 && rx > 6)
		{
			rx-=10;
			bl+= lpy;
			br+= rpy;
		}
		else if(rx<=6 && ry>20)
		{
			ry-=10;
			ll+= lpx;
			lr+= rpx;
		}
		else if(rx <= 6 && ry <= 20 && ry > 6)
		{
			stage1 = 0;
			stage2 = 1;
		}
	if(!stage1 && stage2)
	{
		tal = tl / countert;
		tar = tr / countert;
		bar = br / countert;
		bal = bl / countert;
		lal = ll / (counterl-1);
		lar = lr / (counterl-1);
		ral = rl / counterl;
		rar = rr / counterl;
		std::cout<<tal<<" : "<<tar<<" : "<<lal<<" : "<<lar<<std::endl;
		std::cout<<ral<<" : "<<rar<<" : "<<bal<<" : "<<bar<<std::endl;
		stage2 = 0;
		rx=200;ry=200;
	}

	if(!stage1 && !stage2)
	{
		if( //lpx >= lal && 
			lpx <= ral &&
			//rpx >= lar &&
			rpx <= rar &&
			//lpy >= tal &&
			lpy <= bal &&
			//rpy >= tar &&
			rpy <= bar )
			std::cout<<"INSIDE\n";
		else
			std::cout<<"OUTSIDE\n";
	}

	/*	if(rx<200 ) {rx++; px=100; py=100;}
		else if(rx<400) {rx++; px=200; py=300;}
		else if(rx < 600) {rx++; px=400; py =200;}
		else rx=0;*/
	arr[rx][ry][0]=lpx1; arr[rx][ry][0]=lpy1;

	//int px,py;
	MyFilledCircle( circ_image, Point( rx, ry) );
	setWindowProperty( circ_window, CV_WND_PROP_FULLSCREEN, CV_WINDOW_FULLSCREEN);
	imshow( circ_window, circ_image );
	moveWindow( circ_window, 00, 00 );




	frame = cvQueryFrame( capture );
	// Reducing the resolution of the image to increase speed
	cv::Mat smallFrame;
	cv::resize(frame,smallFrame,cv::Size(round(1*frame.cols), round(1*frame.rows)),1,1,cv::INTER_LANCZOS4);

	// mirror it
	cv::flip(smallFrame, smallFrame, 1);
	smallFrame.copyTo(debugImage);

	// Apply the classifier to the frame
	if( !smallFrame.empty() ) {
		detectAndDisplay( smallFrame );
	}
	else {
		printf(" --(!) No captured frame -- Break!");
		break;
	}

	imshow(main_window_name,debugImage);

	int c = cv::waitKey(10);
	if( (char)c == 'c' ) { break; }
	if( (char)c == 'f' ) {
		imwrite("frame.png",smallFrame);
	}

		}
	}

	releaseCornerKernels();

	return 0;
}
Exemplo n.º 30
0
/*
 * @function main
 */
int main( int argc, const char** argv ) {

  // Get the mode
  if (argc > 1)
  {
    const char *inputMode = argv[1];
    if (strcmp(inputMode, "normal") == 0) {
      mode = NORMAL;
    } else if (strcmp(inputMode, "debug") == 0) {
      mode = DEBUG;
    } else if (strcmp(inputMode, "plot") == 0) {
      mode = PLOT;
    } else {
      mode = NORMAL;
    }
  }
  else
  {
    mode = NORMAL;
  }

  if (mode == NORMAL) {
    eventHandler = EventHandler();
  }

  if (mode == DEBUG || mode == NORMAL) {
    printf("Input Mode: %s\n", mode == NORMAL ? "normal" :
        mode == DEBUG ? "debug" :
        mode == PLOT ? "plot" : "none");

    cv::namedWindow(main_window_name,CV_WINDOW_NORMAL);
    cv::moveWindow(main_window_name, 400, 100);
    cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
    cv::moveWindow(face_window_name, 10, 100);
    cv::namedWindow("Right Eye",CV_WINDOW_NORMAL);
    cv::moveWindow("Right Eye", 10, 600);
    cv::namedWindow("Left Eye",CV_WINDOW_NORMAL);
    cv::moveWindow("Left Eye", 10, 800);
  } else if (mode == PLOT) {
    cv::namedWindow(face_window_name,CV_WINDOW_NORMAL);
    cv::moveWindow(face_window_name, 400, 100);
  }

  cv::Mat frame;

  // Load the cascades
  if( !face_cascade.load( FACE_CASCADE_FILE ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; };


  // Read the video stream
  cv::VideoCapture capture( 0 );
  if( capture.isOpened() ) {
    capture.set(CV_CAP_PROP_FRAME_WIDTH, 640);
    capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
    capture.set(CV_CAP_PROP_FPS, 15);
    capture >> frame;
    while( true ) {
      capture >> frame;

      // mirror it
      cv::flip(frame, frame, 1);
      frame.copyTo(debugImage);

      // Apply the classifier to the frame
      if( !frame.empty() ) {
        detectAndDisplay( frame );
      }
      else {
        printf(" --(!) No captured frame -- Break!");
        break;
      }
      if (mode == DEBUG || mode == NORMAL) {
        imshow(main_window_name, debugImage);
      }

      if (mode == DEBUG || mode == PLOT || mode == NORMAL) {
        int c = cv::waitKey(10);
        if( (char)c == 'c' ) { break; }
        if( (char)c == 'f' ) {
          imwrite("frame.png", frame);
        }
      }
    }
  }