コード例 #1
0
ファイル: main.cpp プロジェクト: CoreyXiong/FaceDetect
int main() { 
#ifdef TRAIN
	train();
#else	
	cascadeClassifier cascade = loadBinClassifier();
	while(true) {
		cout << "input name of a picture : " << endl;
		string name;
		cin >> name;
		if(name == "end") {
			break;
		}
		name = "d:\\test_data\\" + name + ".jpg";
		if( (_access( name.c_str(), 0 )) == -1 ) {
			cout << "The file does not exist, please input again" << endl;
			continue;
		}
		IplImage *img = cvLoadImage(name.c_str(), CV_LOAD_IMAGE_UNCHANGED);

		//cout << img->height << ' ' << img->width << endl;
		float time_start = clock();
		vector<myRect> ans = faceDetect(img, cascade, 3, false, 1.1);
		float time_end = clock();

		printf("人脸个数: %d   识别用时: %d ms\n", ans.size(), (int)(time_end - time_start));
		CvScalar FaceCirclecolors[] = 
		{
			{{0, 0, 255}},
			{{0, 128, 255}},
			{{0, 255, 255}},
			{{0, 255, 0}},
			{{255, 128, 0}},
			{{255, 255, 0}},
			{{255, 0, 0}},
			{{255, 0, 255}}
		};
		for(int i = 0; i < (int)ans.size(); i++) {
			myRect* r = &ans[i];
			CvPoint center;
			int radius;
			center.x = cvRound((r->x + r->width * 0.5));
			center.y = cvRound((r->y + r->height * 0.5));
			radius = cvRound((r->width + r->height) * 0.25);
			cvCircle(img, center, radius, FaceCirclecolors[i % 8], 2);
		}

		const char *pstrWindowsTitle = "My Face Detection";
		cvNamedWindow(pstrWindowsTitle, CV_WINDOW_AUTOSIZE);
		cvShowImage(pstrWindowsTitle, img);

		cvWaitKey(0);

		cvDestroyWindow(pstrWindowsTitle);
		cvReleaseImage(&img);	
	}
#endif
}
コード例 #2
0
int main( int argc, char** argv )
{
  ros::init(argc, argv, "face_detect");
  ros::NodeHandle n;
  
  opencvCommands = n.advertise<robotBrain::opencvMessage>("opencv_commands",1000);
  image_transport::ImageTransport it(n);
  pub = it.advertise("camera/image", 1);
  
  cv::VideoCapture cap(1);
  
  if(!cap.isOpened()){
		ROS_FATAL("opencv:  COULD NOT OPEN CAMERA" );
		sendError();
	}
  
  //Load the cascades
	
  if( !face_cascade.load( face_cascade_name ) ){ 
    ROS_FATAL("--(!)Error loading FACE CASCADE(!)--" ); 
    sendError(); 
  }
  if( !eyes_cascade.load( eyes_cascade_name ) ){ 
    ROS_FATAL("--(!)Error loading EYE CASCADE(!)--"); 
    sendError(); 
  }
  
    cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
  char key;
    while ( n.ok() ){
      cap.read(frame);
      if( !frame.empty() ) colorDetect();//detectAndDisplay(); 
      else {
	ROS_FATAL("opencv: FRAME FAIL " );
	sendError();	
      }
	//showing image
	cv::namedWindow( "Patrolling Android View", CV_WINDOW_AUTOSIZE );
  	cv::startWindowThread();
	cv::imshow( "Patrolling Android View", imgHSV);
      if (color & !face ) faceDetect();			//if we have a color and we havent previously detected a face we look for a face
      
      if(face) personTracking();				//if we have a face we follow the color 
      else {							//if not 
	message.camera = 's';
	message.errorOpenCV = '0';
	opencvCommands.publish( message );
      }
      key = cv::waitKey(100);
switch (key)
        {
            //hue
            case 'q':
                if (hue_low == hue_high)
                    ROS_INFO("hue_low must be less than hue_high");
                else 
                hue_low = hue_low + 1;
            break;
            case 'a':
                if (hue_low == 0) 
                    ROS_INFO("Hue is minimum");
                else
                    hue_low = hue_low - 1;
            break;
            case 'w':
                if (hue_high == 255)
                    ROS_INFO("Hue is maximum");
                else
                    hue_high = hue_high + 1;
            break;
            case 's':
                if (hue_high == hue_low)
                    ROS_INFO("hue_high must be greater than hue_low");
                else
                    hue_high = hue_high - 1;
            break;
           
            //saturation 
            case 'e':
                if (sat_low == sat_high)
                    ROS_INFO("sat_low must be less than sat_high");
                else 
                sat_low = sat_low + 1;
            break;
            case 'd':
                if (sat_low == 0) 
                    ROS_INFO("sat is minimum");
                else
                    sat_low = sat_low - 1;
            break;
            case 'r':
                if (sat_high == 255)
                    ROS_INFO("sat is maximum");
                else
                    sat_high = sat_high + 1;
            break;
            case 'f':
                if (sat_high == sat_low)
                    ROS_INFO("sat_high must be greater than sat_low");
                else
                    sat_high = sat_high - 1;
            break;
            
            //value 
            case 't':
                if (val_low == val_high)
                    ROS_INFO("val_low must be less than val_high");
                else 
                val_low = val_low + 1;
            break;
            case 'g':
                if (val_low == 0) 
                    ROS_INFO("val is minimum");
                else
                    val_low = val_low - 1;
            break;
            case 'y':
                if (val_high == 255)
                    ROS_INFO("val is maximum");
                else
                    val_high = val_high + 1;
            break;
            case 'h':
                if (val_high == val_low)
                    ROS_INFO("val_high must be greater than val_low");
                else
                    val_high = val_high - 1;
            break;
        }
      //ROS_INFO("Frames");
	ROS_INFO("Hue: %d-%d\tSat: %d-%d\tVal: %d-%d\n", hue_low, hue_high, sat_low, sat_high, val_low, val_high);
   }
  ROS_FATAL("COULD NOT CAPTURE FRAME");
  return -1;
}
コード例 #3
0
ファイル: main.c プロジェクト: mifi/facetracker
int videoFunc() {
	IplImage *frame = NULL; 
	IplImage *image = NULL, *image2 = NULL;
	IplImage *prev_img = NULL;

	CvPoint2D32f facePt1, facePt2;

	int isTrackingFace = FALSE;
	int initOptFlow = FALSE;

	while (!quitFlag) {
		char c;

		// New detected face position
		CvPoint2D32f newFacePt1, newFacePt2;

		int detectedFace = FALSE;

		if (numImgs == 0) {
			frame = cvQueryFrame(capture);
			//cvGrabFrame(capture);
			//img = cvRetrieveFrame(capture);
			if (frame == NULL) {
				fprintf(stderr, "Failed to grab frame\n");
				return -1;
			}
		}
		else {
			frame = cvLoadImage(imgPaths[videoTick % numImgs], CV_LOAD_IMAGE_UNCHANGED);

			if (frame == NULL) {
				fprintf(stderr, "Failed to load input image\n");
				return -1;
			}
		}

		image = cvCloneImage(frame);
		image2 = cvCloneImage(frame);
		if (image == NULL || image2 == NULL) {
			fprintf(stderr, "Failed to clone image\n");
			return -1;
		}

		if (videoTick % 10 == 0 && faceDetect(image, &newFacePt1, &newFacePt2) > -1) {
		//if (videoTick == 0 && faceDetect(image, &newFacePt1, &newFacePt2) > -1) {
			//if (((float)(newFacePos.x - facePos.x) / image->width < FACE_MOVEMENT_THRESHOLD && (float)(newFacePos.y - facePos.y) / image->height < FACE_MOVEMENT_THRESHOLD)
			//	|| !isTrackingFace) {
				detectedFace = TRUE;
				initOptFlow = TRUE;
				facePt1 = newFacePt1;
				facePt2 = newFacePt2;
			//}
		}

		if (!detectedFace && prev_img != NULL) {
			fprintf(stderr, "Failed to detect face, trying to track\n");

			if (opt_flow_find_points(prev_img, image, initOptFlow, &facePt1, &facePt2, &newFacePt1, &newFacePt2, image2) < 0) {
				fprintf(stderr, "Warning: couldn't track any flow points\n");
				isTrackingFace = FALSE;
			}
			else {
				//if ((float)(newFacePos.x - facePos.x) / image->width < FACE_MOVEMENT_THRESHOLD && (float)(newFacePos.y - facePos.y) / image->height < FACE_MOVEMENT_THRESHOLD) {
					isTrackingFace = TRUE;
					facePt1 = newFacePt1;
					facePt2 = newFacePt2;
				/*}
				else {
					isTrackingFace = FALSE;
				}*/
			}

			if (initOptFlow)
				initOptFlow = FALSE;
		}
		else
			isTrackingFace = TRUE;



		// Set previous image pointer and free old previouse image
		cvReleaseImage(&prev_img);
		prev_img = image;


		// Draw the rectangle in the input image
		if (isTrackingFace)
			draw_rect(image2, facePt1, facePt2, detectedFace);
		else {
			drawFullscreenRect(image2);
		}

		cvFlip(image2, NULL, 1);
		cvShowImage("preview", image2);

		c = cvWaitKey(10);
		if(c == 27 || c == 'q')
			break;

		cvReleaseImage(&image2);

		if (numImgs > 0)
			cvReleaseImage(&frame);

		videoTick++;
	}

	return 0;
}
コード例 #4
0
ファイル: camcv.c プロジェクト: Ensiss/pima-embarque
/**
 *  buffer header callback function for video
 *
 * @param port Pointer to port from which callback originated
 * @param buffer mmal buffer header pointer
 */
static void video_buffer_callback(MMAL_PORT_T *port, MMAL_BUFFER_HEADER_T *buffer)
{
  MMAL_BUFFER_HEADER_T *new_buffer;
  PORT_USERDATA *pData = (PORT_USERDATA *)port->userdata;

  if (pData)
    {

      if (buffer->length)
	{

	  mmal_buffer_header_mem_lock(buffer);

	  //
	  // *** PR : OPEN CV Stuff here !
	  //
	  int w=pData->pstate->width;	// get image size
	  int h=pData->pstate->height;
	  int h4=h/4;

	  faceDetect(buffer->data);

	  /* memcpy(py->imageData,buffer->data,w*h);	// read Y */

	  /* if (pData->pstate->graymode==0) */
	  /*   { */
	  /*     memcpy(pu->imageData,buffer->data+w*h,w*h4); // read U */
	  /*     memcpy(pv->imageData,buffer->data+w*h+w*h4,w*h4); // read v */

	  /*     cvResize(pu, pu_big, CV_INTER_NN); */
	  /*     cvResize(pv, pv_big, CV_INTER_NN);  //CV_INTER_LINEAR looks better but it's slower */
	  /*     cvMerge(py, pu_big, pv_big, NULL, image); */

	  /*     cvCvtColor(image,dstImage,CV_YCrCb2RGB);	// convert in RGB color space (slow) */
	  /*     cvShowImage("camcvWin", dstImage ); */
	  /*   } */
	  /* else */
	  /*   { */
	  /*     cvShowImage("camcvWin", py); // display only gray channel */
	  /*   } */

	  cvWaitKey(1);
	  nCount++;		// count frames displayed

	  mmal_buffer_header_mem_unlock(buffer);
	}
      else vcos_log_error("buffer null");

    }
  else
    {
      vcos_log_error("Received a encoder buffer callback with no state");
    }

  // release buffer back to the pool
  mmal_buffer_header_release(buffer);

  // and send one back to the port (if still open)
  if (port->is_enabled)
    {
      MMAL_STATUS_T status;

      new_buffer = mmal_queue_get(pData->pstate->video_pool->queue);

      if (new_buffer)
	status = mmal_port_send_buffer(port, new_buffer);

      if (!new_buffer || status != MMAL_SUCCESS)
	vcos_log_error("Unable to return a buffer to the encoder port");
    }

}