示例#1
0
    int FindFacesRect(const cv::Mat &source, std::vector<cv::Rect> &faces)
    {
        const std::string cascadeName = "d:/Install/Programming/opencv/data/haarcascades/haarcascade_frontalface_alt.xml";
        const double scale = 2;

        cv::CascadeClassifier cascade;
        if( !cascade.load( cascadeName ) )
        {
            throw std::runtime_error("ERROR: Could not load classifier cascade");
        }

        cv::Mat gray;
        cv::cvtColor( source, gray, CV_BGR2GRAY );
        cv::Mat smallImg(cvRound (source.rows/scale), cvRound(source.cols/scale), CV_8UC1 );
        cv::resize( gray, smallImg, smallImg.size(), 0, 0, cv::INTER_LINEAR );
        cv::equalizeHist( smallImg, smallImg );

        cascade.detectMultiScale( smallImg, faces,
            1.1, 2, 0
            //|CV_HAAR_FIND_BIGGEST_OBJECT
            //|CV_HAAR_DO_ROUGH_SEARCH
            |CV_HAAR_SCALE_IMAGE
            ,
            cv::Size(30, 30) );

        for( std::vector<cv::Rect>::iterator r = faces.begin(); r != faces.end(); r++)
        { 
            r->x = cvRound(r->x * scale);
            r->y = cvRound(r->y * scale);
            r->height = cvRound(r->height * scale);
            r->width = cvRound(r->width * scale);
        }
    }
int main (int argc, char **argv)
{
    cv::Mat input,mask_img,not_masked;
    //loading haar classifier
    std::string cascadeName = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
    cv::CascadeClassifier cascade;
    if(!cascade.load(cascadeName)){
        printf("ERROR: cascadefile見つからん!\n");
        return -1;
    }
    
    //loading resource file(for face)
    input=cv::imread("/Users/naoto/git/opencv_gl/opencv/minematsu1.png",1);
    if(input.empty()){
        printf("ERROR: image not found!\n");
        return 0;
    }
    
    double scale = 4.0;
    cv::Mat gray, smallImg(cv::saturate_cast<int>(input.rows/scale),cv::saturate_cast<int>(input.cols/scale),CV_8UC1);
    cv::cvtColor(input, gray, CV_BGR2GRAY);
    cv::resize(gray, smallImg, smallImg.size(),0,0,cv::INTER_LINEAR);
    cv::equalizeHist(smallImg, smallImg);//ヒストグラムビンの合計値が 255 になるようヒストグラムを正規化
    
    std::vector<cv::Rect> faces;
    cascade.detectMultiScale(smallImg, faces,1.1,2,CV_HAAR_SCALE_IMAGE,cv::Size(20,20));
    
    int i;
    printf("deteced faces:%d\n",(int)faces.size());
    for (i=0; i<faces.size(); i++) {
        cv::Point center,p1,p2;
        int radius;
        //saturate_castについては http://opencv.jp/opencv-2svn/cpp/operations_on_arrays.html
        center.x = cv::saturate_cast<int>((faces[i].x + faces[i].width*0.5)*scale);//scaleはここで戻していることに注意!
        center.y = cv::saturate_cast<int>((faces[i].y + faces[i].height*0.5)*scale);
        radius = cv::saturate_cast<int>((faces[i].width + faces[i].height)*0.25*scale);
        p1.x=center.x-radius;p1.y=center.y-radius;
        p2.x=center.x+radius;p2.y=center.y+radius;
        cv::Rect roi_rect(center.x-radius,center.y-radius,radius*2,radius*2);//左上のx座標,y座標,width,depthというふうに格納していく
        mask_img.create(input.size(), CV_8UC1);
        mask_img=cv::Scalar(0,0,0);
        not_masked=mask_img(roi_rect);
        not_masked=cv::Scalar(255,255,255);
    }
    
    cv::namedWindow("result",1);
    cv::namedWindow("masked",1);
    cv::imshow("result", input);
    cv::imshow("masked", mask_img);
    cv::waitKey(0);
    return 0;
    
}
示例#3
0
std::vector<double> faceToVecMat(cv::Mat img) {
    std::vector<double> ret;
    cv::Mat smallImg(96, 96, CV_8UC1);
    cv::Size smallImgSize = smallImg.size();

    cv::resize(img, smallImg, smallImgSize, 0, 0, cv::INTER_LINEAR);
    // Generate 128 element face vector using DNN
    cv::Mat blob = cv::dnn::blobFromImage(smallImg, 1.0 / 255, smallImgSize,
                                          cv::Scalar(), true, false);
    faceRecogNet.setInput(blob);
    cv::Mat vec = faceRecogNet.forward();
    // Return vector
    for (int i = 0; i < vec.rows; ++i)
        ret.insert(ret.end(), vec.ptr<float>(i), vec.ptr<float>(i) + vec.cols);
    return ret;
}
示例#4
0
void FaceDetect::detect(cv::Mat& img, fd_cb_t fd_cb) {
  int i = 0;
  double t = 0;
  std::vector<cv::Rect> faces;
  const static cv::Scalar colors[] =  { CV_RGB(0,0,255),
      CV_RGB(0,128,255),
      CV_RGB(0,255,255),
      CV_RGB(0,255,0),
      CV_RGB(255,128,0),
      CV_RGB(255,255,0),
      CV_RGB(255,0,0),
      CV_RGB(255,0,255)} ;
  cv::Mat gray, smallImg( cvRound (img.rows/scale_), cvRound(img.cols/scale_), CV_8UC1 );

  cv::cvtColor( img, gray, CV_BGR2GRAY );
  cv::resize( gray, smallImg, smallImg.size(), 0, 0, cv::INTER_LINEAR );
  equalizeHist( smallImg, smallImg );

  t = (double)cvGetTickCount();
  cascade_.detectMultiScale( smallImg, faces,
          1.1, 2, 0
          //|CV_HAAR_FIND_BIGGEST_OBJECT
          //|CV_HAAR_DO_ROUGH_SEARCH
          |CV_HAAR_SCALE_IMAGE
          ,
          cv::Size(30, 30) );

  fd_cb_t cb = fd_cb ? fd_cb : fd_cb_;
  for( std::vector<cv::Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
  {
    cv::Mat smallImgROI;
    cv::Point center;
    cv::Scalar color = colors[i%8];
    int radius;
    center.x = cvRound((r->x + r->width*0.5)*scale_);
    center.y = cvRound((r->y + r->height*0.5)*scale_);
    radius = cvRound((r->width + r->height)*0.25*scale_);

    if(cb) {
      cb(center.x, center.y, radius);
    }
  }
}
示例#5
0
int main (int argc, char **argv)
{
    std::string cascadeName = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
    cv::CascadeClassifier cascade;
    if(!cascade.load(cascadeName)){
        printf("ERROR: cascadefile見つからん!\n");
        return -1;
    }
    
    cv::Mat frame;
    cv::VideoCapture cap;
    cap.open(0);
    cap >> frame;
    
    cv::namedWindow("result",1);
    cv::createTrackbar("size", "result", &size_of_mosaic, 30,0);
    
    double scale = 4.0;
    cv::Mat gray, smallImg(cv::saturate_cast<int>(frame.rows/scale),cv::saturate_cast<int>(frame.cols/scale),CV_8UC1);
    
    for(;;){
        cap >> frame;
        cv::cvtColor(frame, gray, CV_BGR2GRAY);
        cv::resize(gray, smallImg, smallImg.size(),0,0,cv::INTER_LINEAR);
        cv::equalizeHist(smallImg, smallImg);
        
        std::vector<cv::Rect> faces;
        cascade.detectMultiScale(smallImg, faces,1.1,3,CV_HAAR_SCALE_IMAGE,cv::Size(20,20));
        
        int i;
        for (i=0; i<faces.size(); i++) {
            
            #if FLAG //use built-in function

            cv::Point center;
            int radius;
            center.x = cv::saturate_cast<int>((faces[i].x + faces[i].width*0.5)*scale);
            center.y = cv::saturate_cast<int>((faces[i].y + faces[i].height*0.5)*scale);
            radius = cv::saturate_cast<int>((faces[i].width + faces[i].height)*0.25*scale);
            if(size_of_mosaic < 1)size_of_mosaic=1;
            cv::Rect roi_rect(center.x-radius,center.y-radius,radius*2,radius*2);
            cv::Mat mosaic = frame(roi_rect);
            cv::Mat tmp = frame(roi_rect);
            cv::resize(mosaic, tmp, cv::Size(radius/size_of_mosaic,radius/size_of_mosaic),0,0);
            cv::resize(tmp, mosaic, cv::Size(radius*2,radius*2),0,0,CV_INTER_NN);
            
            #else
            //目元の辺りに線を引く
            cv::Point center;
            int radius;
            double eye_ratio=0.2;//顔の中心から半径何割のところに線を引くか
            center.x = cv::saturate_cast<int>((faces[i].x + faces[i].width*0.5)*scale);
            center.y = cv::saturate_cast<int>((faces[i].y + faces[i].height*0.5)*scale);
             radius = cv::saturate_cast<int>((faces[i].width + faces[i].height)*0.25*scale);
            cv::line(frame,cv::Point(center.x-radius,center.y-radius*eye_ratio),cv::Point(center.x-radius+radius*2,center.y-radius*eye_ratio) ,cv::Scalar(0),80,8,0);
            
            #endif
        }
        
        cv::imshow("result", frame);
        
        int key = cv::waitKey(10);
        if(key == 'q' || key == 'Q')
            break;
        
        
    }
    
    return 0;
   
}
示例#6
0
void MyFaceDetect::detectFace( IplImage * imgsrc, CascadeClassifier& cascade, CascadeClassifier& nestedCascade, double scale, bool tryflip )
{
	Mat img(imgsrc,0);
	int i = 0;
	double t = 0;
	vector<Rect> faces, faces2;
	const static Scalar colors[] =  { CV_RGB(0,0,255),
		CV_RGB(0,128,255),
		CV_RGB(0,255,255),
		CV_RGB(0,255,0),
		CV_RGB(255,128,0),
		CV_RGB(255,255,0),
		CV_RGB(255,0,0),
		CV_RGB(255,0,255)} ;
	Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );

	cvtColor( img, gray, CV_BGR2GRAY );
	resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
	equalizeHist( smallImg, smallImg );

	t = (double)cvGetTickCount();
	cascade.detectMultiScale( smallImg, faces,
		1.1, 2, 0
		//|CV_HAAR_FIND_BIGGEST_OBJECT
		//|CV_HAAR_DO_ROUGH_SEARCH
		|CV_HAAR_SCALE_IMAGE
		,
		Size(30, 30) );
	if( tryflip )
	{
		flip(smallImg, smallImg, 1);
		cascade.detectMultiScale( smallImg, faces2,
			1.1, 2, 0
			//|CV_HAAR_FIND_BIGGEST_OBJECT
			//|CV_HAAR_DO_ROUGH_SEARCH
			|CV_HAAR_SCALE_IMAGE
			,
			Size(30, 30) );
		for( vector<Rect>::const_iterator r = faces2.begin(); r != faces2.end(); r++ )
		{
			faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y, r->width, r->height));
		}
	}
	t = (double)cvGetTickCount() - t;
	printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
	for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
	{
		Mat smallImgROI;
		vector<Rect> nestedObjects;
		Point center;
		Scalar color = colors[i%8];
		int radius;

		double aspect_ratio = (double)r->width/r->height;
		if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
		{
			center.x = cvRound((r->x + r->width*0.5)*scale);
			center.y = cvRound((r->y + r->height*0.5)*scale);
			radius = cvRound((r->width + r->height)*0.25*scale);
			circle( img, center, radius, color, 3, 8, 0 );
		}
		else
			rectangle( img, cvPoint(cvRound(r->x*scale), cvRound(r->y*scale)),
			cvPoint(cvRound((r->x + r->width-1)*scale), cvRound((r->y + r->height-1)*scale)),
			color, 3, 8, 0);
		if( nestedCascade.empty() )
			continue;
		smallImgROI = smallImg(*r);
		nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
			1.1, 2, 0
			//|CV_HAAR_FIND_BIGGEST_OBJECT
			//|CV_HAAR_DO_ROUGH_SEARCH
			//|CV_HAAR_DO_CANNY_PRUNING
			|CV_HAAR_SCALE_IMAGE
			,
			Size(30, 30) );
		for( vector<Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )
		{
			center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
			center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
			radius = cvRound((nr->width + nr->height)*0.25*scale);
			circle( img, center, radius, color, 3, 8, 0 );
		}
	}
	cv::imshow( "result", img );
}
示例#7
0
// usage: poisson.exe [source image (color)] [destination image (color] [mask image (gray)] [outout image (color] [offset y] [offset x]
int main(int argc, char** argv){
    int i;
    int offset[2];
    IplImage *im_src = NULL, *im_dst = NULL, *im_mask = NULL;
    
    if(argc != 7){
        fprintf(stderr,"usage: poisson.exe [source image (color)] [destination image (color)] [mask image (gray)] [outout image (color)] [offset y] [offset x]\n");
        exit(0);
    }
//////loading source image(cv::Mat input) & creating mask(cv::Mat mask_img)
    cv::Mat input,mask_img,not_masked;
    
    //loading haar classifier
    std::string cascadeName = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
    cv::CascadeClassifier cascade;
    if(!cascade.load(cascadeName)){
        printf("ERROR: cascadefile見つからん!\n");
        return -1;
    }
    
    //loading resource file(for face)
    input=cv::imread(argv[1],1);
    if(input.empty()){
        printf("ERROR: image not found!\n");
        return 0;
    }
    
    double scale = 4.0;
    cv::Mat gray, smallImg(cv::saturate_cast<int>(input.rows/scale),cv::saturate_cast<int>(input.cols/scale),CV_8UC1);
    cv::cvtColor(input, gray, CV_BGR2GRAY);
    cv::resize(gray, smallImg, smallImg.size(),0,0,cv::INTER_LINEAR);
    cv::equalizeHist(smallImg, smallImg);//ヒストグラムビンの合計値が 255 になるようヒストグラムを正規化
    
    std::vector<cv::Rect> faces;
    cascade.detectMultiScale(smallImg, faces,1.1,2,CV_HAAR_SCALE_IMAGE,cv::Size(20,20));
    
    int i;
    printf("deteced faces:%d\n",(int)faces.size());
    for (i=0; i<faces.size(); i++) {
        cv::Point center;
        int radius;
        //saturate_castについては http://opencv.jp/opencv-2svn/cpp/operations_on_arrays.html
        center.x = cv::saturate_cast<int>((faces[i].x + faces[i].width*0.5)*scale);//scaleはここで戻していることに注意!
        center.y = cv::saturate_cast<int>((faces[i].y + faces[i].height*0.5)*scale);
        radius = cv::saturate_cast<int>((faces[i].width + faces[i].height)*0.25*scale);
        cv::Point p1,p2;
        p1.x=center.x-radius;p1.y=center.y-radius;
        p2.x=center.x+radius;p2.y=center.y+radius;
        cv::Rect roi_rect(center.x-radius,center.y-radius,radius*2,radius*2);//左上のx座標,y座標,width,depthというふうに格納していく
        mask_img.create(input.size(), CV_8UC1);
        mask_img=cv::Scalar(0,0,0);
        not_masked=mask_img(roi_rect);
        not_masked=cv::Scalar(255,255,255);
    }
    // for debug(checking masks)
//    cv::namedWindow("result",1);
//    cv::namedWindow("masked",1);
//    cv::imshow("result", input);
//    cv::imshow("masked", mask_img);
//    cv::waitKey(0);
    
    //loading destination image() & find position and size
    //resizing source image & calc offset
    if( (im_src = cvLoadImage( argv[1], CV_LOAD_IMAGE_COLOR)) == 0 ){
        fprintf(stderr,"No such file %s", argv[1]);
        exit(0);
    }
    if( (im_dst = cvLoadImage( argv[2], CV_LOAD_IMAGE_COLOR)) == 0 ){
        fprintf(stderr,"No such file %s", argv[2]);
        exit(0);
    }
    if( (im_mask = cvLoadImage( argv[3], CV_LOAD_IMAGE_GRAYSCALE)) == 0 ){
        fprintf(stderr,"No such file %s", argv[3]);
        exit(0);
    }
    offset[0]=atoi(argv[5]);
    offset[1]=atoi(argv[6]);
    
    for(i=0;i<3;i++){// i:channnels
        quasi_poisson_solver(im_src, im_dst, im_mask, i, offset);
        //poisson_solver(im_src, im_dst, im_mask, i, offset);
    }
    cvSaveImage(argv[4],im_dst);
    cvReleaseImage(&im_src);
    cvReleaseImage(&im_dst);
    cvReleaseImage(&im_mask);
    return 0;
}
示例#8
0
double CDetectionAlgs::BoostingDetection( vector<Rect>& objs,
        const CascadeClassifier& cascade,
        const Mat& img,
        const Rect* confinedArea,
        const double scale,
        Size sSize,
        Size bSize)
{
    double res = (double)cvGetTickCount();
    objs.clear();

    Mat confinedImg;
    if(confinedArea)
        confinedImg = img(*confinedArea);
    else
        confinedImg = img;

    Mat gray, smallImg( cvRound (confinedImg.rows/scale),
                        cvRound(confinedImg.cols/scale),
                        CV_8UC1 );

    if(confinedImg.channels() == 3)
        cvtColor( confinedImg, gray, CV_BGR2GRAY );
    else
        gray = confinedImg;
    resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
    equalizeHist( smallImg, smallImg );

    /////////////////detection/////////////////////////////////////////
    //t = (double)cvGetTickCount();
    const_cast<CascadeClassifier&>(cascade).detectMultiScale(
        smallImg,
        objs,
        1.1,
        2,
        0,
        //|CascadeClassifier::DO_CANNY_PRUNING
        //|CascadeClassifier::FIND_BIGGEST_OBJECT
        //|CascadeClassifier::DO_ROUGH_SEARCH
        //|CascadeClassifier::SCALE_IMAGE,
        sSize,
        bSize);

    ///////////////////////sort///////////////////////////////////////
    if (objs.size() > 0)
    {
        qsort( (void *)&(objs[0]), objs.size(), sizeof(Size), cvSizeCompare );
        // re-position
        if (confinedArea)
        {
            for (int i = 0; i < objs.size(); ++i)
            {
                objs[i].x = objs[i].x*scale+confinedArea->x;
                objs[i].y = objs[i].y*scale+confinedArea->y;
            }
        }
        else
        {
            for (int i = 0; i < objs.size(); ++i)
            {
                objs[i].x = objs[i].x*scale;
                objs[i].y = objs[i].y*scale;
            }
        }

        //scale back
        for ( int i = 0; i < objs.size(); ++i)
        {
            objs[i].width *= scale;
            objs[i].height *= scale;
        }
    }

    res = ((double)cvGetTickCount() - res)
          / ((double)cvGetTickFrequency()*1000.);
    return res;
}
示例#9
0
int main(int argc, char *argv[])
{
  // 1. load classifier
  std::string cascadeName = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"; //Haar-like
  cv::CascadeClassifier cascade;
  if(!cascade.load(cascadeName)){
    printf("ERROR: cascadeFile not found\n");
    return -1;
  }
  
  // 2. initialize VideoCapture
  cv::Mat frame;
  cv::VideoCapture cap;
  cap.open(0);
  cap >> frame;
  
  // 3. prepare window and trackbar
  cv::namedWindow("result", 1);
  cv::createTrackbar("size", "result", &size_of_mosaic, 30, 0);

  double scale = 4.0;
  cv::Mat gray, smallImg(cv::saturate_cast<int>(frame.rows/scale),
               cv::saturate_cast<int>(frame.cols/scale), CV_8UC1);

  for(;;){
    
    // 4. capture frame
    cap >> frame;
    //convert to gray scale
    cv::cvtColor( frame, gray, CV_BGR2GRAY );
    
    // 5. scale-down the image
	  cv::resize(gray, smallImg, smallImg.size(), 0, 0, cv::INTER_LINEAR);
	  cv::equalizeHist(smallImg, smallImg);
    
    // 6. detect face using Haar-classifier
    std::vector<cv::Rect> faces;
    ///multi-scale face searching
    // image, size, scale, num, flag, smallest rect
	  cascade.detectMultiScale(smallImg, faces,
      1.1,
      2,//この引数を大きくすると検出が早くなる
      CV_HAAR_SCALE_IMAGE,
      cv::Size(30,30));

    // 7. mosaic(pixelate) face-region
    //std::vector<cv::Rect>::const_iterator r = faces.begin();
    //for(; r != faces.end(); ++r) {
    int i;
    for(i=0;i<faces.size();++i){
      cv::Point center;
      int radius;
      center.x = cv::saturate_cast<int>((faces[i].x + faces[i].width*0.5)*scale);
      center.y = cv::saturate_cast<int>((faces[i].y + faces[i].height*0.5)*scale);
      radius = cv::saturate_cast<int>((faces[i].width + faces[i].height)*0.25*scale);
      //mosaic
      if(size_of_mosaic < 1) size_of_mosaic = 1;
      cv::Rect roi_rect(center.x-radius,center.y-radius,radius*2,radius*2);
      cv::Mat mosaic = frame(roi_rect);
      cv::Mat tmp;
      cv::resize(mosaic,tmp,cv::Size(radius / size_of_mosaic, radius / size_of_mosaic),0,0);
      cv::resize(tmp,mosaic, cv::Size(radius*2, radius*2),0,0,CV_INTER_NN);
    }
    
    // 8. show mosaiced image to window
    cv::imshow("result", frame );

    int key = cv::waitKey(10);
    if(key == 'q' || key == 'Q')
        break;

  }
 return 0;
}
示例#10
0
// usage: poisson.exe [source image (color)] [destination image (color] [mask image (gray)] [outout image (color] [offset y] [offset x]
int main(int argc, char** argv){
    int offset[2];
    IplImage *im_src =NULL, *im_dst = NULL, *im_mask = NULL;
    cv::Mat input,dst_img,mask_img,not_masked,mask_img_gray;
    
    if(argc != 4){
        fprintf(stderr,"usage: poisson.exe [source image (color)] [destination image (color)] [outout image (color)] [offset y] [offset x]\n");
        exit(0);
    }
    //////loading source image(cv::Mat input) & creating mask(cv::Mat mask_img)
    //loading haar classifier
    std::string cascadeName = "/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml";
    cv::CascadeClassifier cascade;
    if(!cascade.load(cascadeName)){
        printf("ERROR: cascadefile見つからん!\n");
        return -1;
    }
    //////loading destination image() & find position and size
    //loading destination file(for face)
    dst_img=cv::imread(argv[2],1);
    if(dst_img.empty()){
        printf("ERROR:distination image not found!\n");
        return 0;
    }
    //preprocessing
    double scale = 4.0;
    cv::Mat gray1, smallImg1(cv::saturate_cast<int>(dst_img.rows/scale),cv::saturate_cast<int>(dst_img.cols/scale),CV_8UC1);
    cv::cvtColor(dst_img, gray1, CV_BGR2GRAY);
    cv::resize(gray1, smallImg1, smallImg1.size(),0,0,cv::INTER_LINEAR);
    cv::equalizeHist(smallImg1, smallImg1);//ヒストグラムビンの合計値が 255 になるようヒストグラムを正規化
    std::vector<cv::Rect> faces,faces1,faces2;
    cv::Point center2,center1;
    int radius,radius1,radius2;
    cv::Point p1,p2;
    //find face size
    cascade.detectMultiScale(smallImg1, faces1,1.1,2,CV_HAAR_SCALE_IMAGE,cv::Size(20,20));
    printf("deteced faces:%d\n",(int)faces1.size());
    for (int i=0; i<faces1.size(); i++) {
        //saturate_castについては http://opencv.jp/opencv-2svn/cpp/operations_on_arrays.html
        center1.x = cv::saturate_cast<int>((faces1[i].x + faces1[i].width*0.5)*scale);//scaleはここで戻していることに注意!
        center1.y = cv::saturate_cast<int>((faces1[i].y + faces1[i].height*0.5)*scale);
        radius1 = cv::saturate_cast<int>((faces1[i].width + faces1[i].height)*0.25*scale);
    }
    
    //loading resource file(for face)
    input=cv::imread(argv[1],1);
    if(input.empty()){
        printf("ERROR:resource image not found!\n");
        return 0;
    }
    //preprocessing
    cv::Mat gray, smallImg(cv::saturate_cast<int>(input.rows/scale),cv::saturate_cast<int>(input.cols/scale),CV_8UC1);
    cv::cvtColor(input, gray, CV_BGR2GRAY);
    cv::resize(gray, smallImg, smallImg.size(),0,0,cv::INTER_LINEAR);
    cv::equalizeHist(smallImg, smallImg);//ヒストグラムビンの合計値が 255 になるようヒストグラムを正規化
    //find face size and generating mask
    cascade.detectMultiScale(smallImg, faces2,1.1,2,CV_HAAR_SCALE_IMAGE,cv::Size(20,20));
    printf("deteced faces:%d\n",(int)faces2.size());
    for (int i=0; i<1; i++) {
        //saturate_castについては http://opencv.jp/opencv-2svn/cpp/operations_on_arrays.html
        center2.x = cv::saturate_cast<int>((faces2[i].x + faces2[i].width*0.5)*scale);//scaleはここで戻していることに注意!
        center2.y = cv::saturate_cast<int>((faces2[i].y + faces2[i].height*0.5)*scale);
        radius2 = cv::saturate_cast<int>((faces2[i].width + faces2[i].height)*0.25*scale);
        p1.x=center2.x-radius;p1.y=center2.y-radius;
        p2.x=center2.x+radius;p2.y=center2.y+radius;
        cv::Rect roi_rect(center2.x-radius2,center2.y-radius2,radius2*2,radius2*2);//左上のx座標,y座標,width,depthというふうに格納していく
        mask_img.create(input.size(), CV_8UC3);
        mask_img=cv::Scalar(0,0,0);//真っ黒に
        not_masked=mask_img(roi_rect);
        not_masked=cv::Scalar(255,255,255);//真っ白に
        cvtColor(mask_img,mask_img_gray,CV_RGB2GRAY);
        input(roi_rect).copyTo(not_masked);
    }
    double ratio=(double)radius2/(double)radius1;//
    int difx=center1.x*ratio-center2.x;
    int dify=center1.y*ratio-center2.y;
    printf("%f%d%d\n",ratio,difx,dify);
    cv::Mat expanded_output(cv::saturate_cast<int>(dst_img.rows*ratio),cv::saturate_cast<int>(dst_img.cols*ratio),CV_8UC1);
    cv::resize(dst_img,expanded_output,expanded_output.size(),0,0,cv::INTER_LINEAR);
    //resizing source image & calc offset
    
    //    //for debug(checking masks)
    //        cv::namedWindow("input",1);
    //        cv::namedWindow("result",1);
    //        cv::namedWindow("masked",1);
    //        cv::imshow("input", input);
    //        cv::imshow("result", expanded_output);
    //        cv::imshow("masked", mask_img);
    //ref:http://bicycle.life.coocan.jp/takamints/index.php/doc/opencv/doc/Mat_conversion//
    //http://d.hatena.ne.jp/kamekamekame877/20110621
    IplImage buf1=input;//特殊なコピーコンストラクタが呼ばれてるかららしい
    IplImage buf2=mask_img_gray;//同じく
    IplImage buf3=expanded_output;
    im_src=&buf1;
    im_mask=&buf2;
    im_dst=&buf3;
    
    offset[0]=dify;
    offset[1]=difx;
    
    for(int i=0;i<3;i++){// i:channnels
        quasi_poisson_solver(im_src, im_dst, im_mask, i, offset);
        //poisson_solver(im_src, im_dst, im_mask, i, offset);
    }
    cvSaveImage(argv[3],im_dst);
    //    cvReleaseImage(&im_src);
    //    cvReleaseImage(&im_dst);
    //    cvReleaseImage(&im_mask);
    return 0;
}
示例#11
0
void MatchGesture::detectAndDraw(Mat &img, CascadeClassifier &cascade, CascadeClassifier &nestedCascade, double scale)
{
    //cout<<"A Detected"<<endl;
    int i = 0;
    double t = 0;
    vector<Rect> faces;
    const static Scalar colors[] =  { CV_RGB(0,0,255),
                                      CV_RGB(0,128,255),
                                      CV_RGB(0,255,255),
                                      CV_RGB(0,255,0),
                                      CV_RGB(255,128,0),
                                      CV_RGB(255,255,0),
                                      CV_RGB(255,0,0),
                                      CV_RGB(255,0,255)
                                    };

    Mat gray, smallImg( cvRound (img.rows/scale), cvRound(img.cols/scale), CV_8UC1 );

    cvtColor( img, gray, CV_BGR2GRAY );
    cv::resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
    equalizeHist( smallImg, smallImg );

    t = (double)cvGetTickCount();
    cascade.detectMultiScale( smallImg, faces,
                              1.1, 2, 0
                              //|CV_HAAR_FIND_BIGGEST_OBJECT
                              //|CV_HAAR_DO_ROUGH_SEARCH
                              |CV_HAAR_SCALE_IMAGE
                              ,
                              Size(30, 30) );
    t = (double)cvGetTickCount() - t;
    // printf( "detection time = %g ms\n", t/((double)cvGetTickFrequency()*1000.) );
    for( vector<Rect>::const_iterator r = faces.begin(); r != faces.end(); r++, i++ )
    {
        Mat smallImgROI;
        vector<Rect> nestedObjects;
        nestedObjects.clear();
        Point center;
        center.x = 0;
        center.y = 0;
        Scalar color = colors[i%8];
        int radius;
        center.x = cvRound((r->x + r->width*0.5)*scale);
        center.y = cvRound((r->y + r->height*0.5)*scale);
        radius = cvRound((r->width + r->height)*0.25*scale);
        if(center.x != 0 || center.y != 0)
        {
            matchedCharacter = 'A';
            int tempIndex = (int)matchedCharacter-48;
            charactersOccurences[tempIndex]=charactersOccurences[tempIndex]+1;
            if(maxTillNow<charactersOccurences[tempIndex])
            {
                maxTillNow = charactersOccurences[tempIndex];
                maxIndexTillNow = tempIndex;
                std::cout<<"Max Index Till now - "<<maxIndexTillNow<<std::endl;
            }

             //cout<<"A Detected"<<endl;
             // system("espeak < speak.txt");
             //emit sendForDisplay('A');
       }

        circle( img, center, radius, color, 3, 8, 0 );
        if( nestedCascade.empty() )
            continue;

        std::cout<<"First circle command"<<std::endl;
        smallImgROI = smallImg(*r);
        nestedCascade.detectMultiScale( smallImgROI, nestedObjects,
                                        1.1, 2, 0
                                        //|CV_HAAR_FIND_BIGGEST_OBJECT
                                        //|CV_HAAR_DO_ROUGH_SEARCH
                                        //|CV_HAAR_DO_CANNY_PRUNING
                                        |CV_HAAR_SCALE_IMAGE
                                        ,
                                        Size(30, 30) );
        for( vector<Rect>::const_iterator nr = nestedObjects.begin(); nr != nestedObjects.end(); nr++ )
        {
            center.x = cvRound((r->x + nr->x + nr->width*0.5)*scale);
            center.y = cvRound((r->y + nr->y + nr->height*0.5)*scale);
            radius = cvRound((nr->width + nr->height)*0.25*scale);
            circle( img, center, radius, color, 3, 8, 0 );
            std::cout<<"Enter circle drawing - "<<center.x<<"\t"<<center.y<<std::endl;
        }
    }



}
示例#12
0
// Detect faces in a photo
std::vector<FaceRect> detectFaces(cv::String inputName, cv::String cascadeName, double scale, bool infer = false) {
    cv::CascadeClassifier cascade;
	if (!cascade.load(cascadeName)) {
        std::cout << "error;Could not load classifier cascade. Filename: \"" << cascadeName << "\"" << std::endl;
	}

	if (inputName.empty()) {
        std::cout << "error;You must specify the file to process." << std::endl;
	}

    cv::Mat img = cv::imread(inputName, 1);
	if (img.empty()) {
        std::cout << "error;Could not load the file to process. Filename: \"" << inputName << "\"" << std::endl;
	}

    std::vector<cv::Rect> faces;
    cv::Size smallImgSize;
    static bool disableDnn;

#ifdef HAS_OPENCV_DNN
    disableDnn = faceDetectNet.empty();
#else
    disableDnn = true;
#endif
    if (disableDnn) {
        // Classical face detection
        cv::Mat gray;
        cvtColor(img, gray, CV_BGR2GRAY);

        cv::Mat smallImg(cvRound(img.rows / scale), cvRound(img.cols / scale), CV_8UC1);
        smallImgSize = smallImg.size();

        cv::resize(gray, smallImg, smallImgSize, 0, 0, cv::INTER_LINEAR);
        cv::equalizeHist(smallImg, smallImg);

        cascade.detectMultiScale(smallImg, faces, 1.1, 2, CV_HAAR_SCALE_IMAGE, cv::Size(30, 30));
    } else {
#ifdef HAS_OPENCV_DNN
        // DNN based face detection
        faces = detectFacesMat(img);
        smallImgSize = img.size(); // Not using the small image here
#endif
    }

    std::vector<FaceRect> scaled;
    for (std::vector<cv::Rect>::const_iterator r = faces.begin(); r != faces.end(); r++) {
        FaceRect i;
        i.x = (float) r->x / smallImgSize.width;
        i.y = (float) r->y / smallImgSize.height;
        i.width = (float) r->width / smallImgSize.width;
        i.height = (float) r->height / smallImgSize.height;
#ifdef HAS_OPENCV_DNN
        if (infer && !faceRecogNet.empty()) {
            // Get colour image for vector generation
            cv::Mat colourImg;
            cv::resize(img, colourImg, smallImgSize, 0, 0, cv::INTER_LINEAR);
            i.vec = faceToVecMat(colourImg(*r)); // Run vector conversion on the face
        } else {
            i.vec.assign(128, 0);
        }
#else
        i.vec.assign(128, 0);
#endif
        scaled.push_back(i);
    }

    return scaled;
}
int main(int argc, char *argv[])
{
  // 1. load classifier
  std::string cascadeeyeName = /*"Nariz.xml";*/"/usr/local/share/OpenCV/haarcascades/haarcascade_eye.xml";/*"frontalEyes35x16.xml";*//*"/usr/local/share/OpenCV/haarcascades/haarcascade_frontalface_alt.xml"; *///Haar-like
  std::string cascademouthName = "/usr/local/share/OpenCV/haarcascades/haarcascade_mcs_mouth.xml";
  cv::CascadeClassifier cascadeeye, cascademouth;
  if(!cascadeeye.load(cascadeeyeName)){
    printf("ERROR: cascadeeyeFile not found\n");
    return -1;
  }

  if(!cascademouth.load(cascademouthName)){
    printf("ERROR: cascademouthFile not found\n");
    return -1;
  }

  cv::Mat doraemon = cv::imread(doraemon_file);
  cv::Mat doraemon_resized;
  
  // 2. initialize image
  char *input_file;
  // 1. prepare Mat objects for input-image and output-image
  cv::Mat input;

  if(argc == 2){
    input_file = argv[1];
  }
  else{
    input_file = preset_file;
  }

  // 2. read an image from the specified file
  input = cv::imread(input_file,1);
  if(input.empty()){
    fprintf(stderr, "cannot open %s\n", input_file);
    exit(0);
  }
  
  // 3. prepare window and trackbar
  cv::namedWindow("result", 1);
  //cv::namedWindow("mosaic", 1);
  cv::createTrackbar("size", "result", &size_of_mosaic, 30, 0);

  double scale = 4.0;
  cv::Mat gray, smallImg(cv::saturate_cast<int>(input.rows/scale),
			 cv::saturate_cast<int>(input.cols/scale), CV_8UC1);

  //convert to gray scale
  cv::cvtColor( input, gray, CV_BGR2GRAY );

  // 5. scale-down the image
  cv::resize(gray, smallImg, smallImg.size(), 0, 0, cv::INTER_LINEAR);
  cv::equalizeHist(smallImg, smallImg);

  // 6. detect face using Haar-classifier
  std::vector<cv::Rect> faces;
  ///multi-scale face searching
  // image, size, scale, num, flag, smallest rect
  cascadeeye.detectMultiScale(smallImg, faces,
			      1.1,
			      4,//この引数を大きくすると検出が早くなる
			      CV_HAAR_SCALE_IMAGE);/*,
						     cv::Size(30,30)*/

  // 7. mosaic(pixelate) face-region
  //std::vector<cv::Rect>::const_iterator r = faces.begin();
  int i;
  for(i=0;i<faces.size();++i){
    cv::Point center;
    int radius;
    double radiusx, radiusy;
    center.x = cv::saturate_cast<int>((faces[i].x + faces[i].width*0.5)*scale);
    center.y = cv::saturate_cast<int>((faces[i].y + faces[i].height*0.5)*scale);
    radius = cv::saturate_cast<int>((faces[i].width + faces[i].height)*0.25*scale);

    //画像の縦横比で正規化
    if(doraemon.size().width > doraemon.size().height) {
      radiusx = (double)radius;
      radiusy = (double)radius * doraemon.size().height / doraemon.size().width;
    } else {
      radiusy = (double)radius;
      radiusx = (double)radius * doraemon.size().width / doraemon.size().height;
    }

    //rakutencardman

    cv::Rect roi_rect(center.x-radiusx,center.y-radiusy, radiusx*2, radiusy*2);
    cv::Mat mosaic = input(roi_rect);//顔の部分を切り出している
    cv::resize(doraemon, doraemon_resized, mosaic.size());
    mosaic = cv::Scalar(0,0,0);// doraemon_resized;
    cv::add(doraemon_resized, mosaic, mosaic);
    //cv::imshow("mosaic", mosaic);
  }

  printf("%d", (int)faces.size());

  // 8. show mosaiced image to window
  cv::imshow("result", input );

  while(1){
    int c = cv::waitKey();
    
    // 7. process according to input
    switch(c){
    
    case 27://ESC
    case 'q':
      break;

    case 10://ENTER
      cv::imwrite("rakuten_cardman.jpg", input);
      break;
    }
    break;
  }
  return 0;
}
Mat detect_Face_and_eyes( Mat& img, double scale, QVector <face> &find_faces)
{
    vector<Rect> faces;
    const static Scalar colors[] =
    {
        Scalar(255,0,0),
        Scalar(255,128,0),
        Scalar(255,255,0),
        Scalar(0,255,0),
        Scalar(0,128,255),
        Scalar(0,255,255),
        Scalar(0,0,255),
        Scalar(255,0,255)
    };
    Mat gray, smallImg;
    cvtColor( img, gray, COLOR_BGR2GRAY);
    double fx = 1 / scale;
    resize( gray, smallImg, Size(), fx, fx, INTER_LINEAR );
    equalizeHist( smallImg, smallImg );
    obj.cascade.detectMultiScale( smallImg, faces,
                              1.1, 2, 0
                              |CASCADE_SCALE_IMAGE,
                              Size(30, 30) );
    for ( size_t i = 0; i < faces.size(); i++ )
    {
        Scalar color = colors[i%8];
        int radius;

        Rect r = faces[i];
        Mat smallImgROI;
        vector<Rect> nestedObjects;
        Point center;

        face temp;
        find_faces.push_back(temp);

        double aspect_ratio = (double)r.width/r.height;
        if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
        {
            center.x = cvRound((r.x + r.width*0.5)*scale);
            center.y = cvRound((r.y + r.height*0.5)*scale);
        }
        smallImgROI = smallImg( r );
        obj.nestedCascade.detectMultiScale(smallImgROI, nestedObjects,
                                       1.1, 2, 0
                                       |CASCADE_SCALE_IMAGE,
                                       Size(30, 30) );
        find_faces.value(i).set_coord_face(center);
        QVector <Point> write_eyes_array;
        QVector <int> write_radius_eyes_array;
        for ( size_t j = 0; j < nestedObjects.size(); j++ )
        {
            Rect nr = nestedObjects[j];
            center.x = cvRound((r.x + nr.x + nr.width*0.5)*scale);
            center.y = cvRound((r.y + nr.y + nr.height*0.5)*scale);

            radius = cvRound((nr.width + nr.height)*0.25*scale);
            if((radius>=20)&&((center.x>10)&&(center.x<img.size().width-10))&&((center.y>10)&&(center.x<img.size().height-10)))
            {
                write_radius_eyes_array.push_back(radius);
                write_eyes_array.push_back(center);
                circle(img, center, radius, color, 3, 8, 0 );
            }
        }
        find_faces[i].set_coord_eyes(write_eyes_array);
        find_faces[i].set_radius_eyes(write_radius_eyes_array);
    }
    return img;
}