int Test::testCascadeClassifier() { CvCapture* capture; Mat frame; Mat frame_original; //-- 1. Load the cascades if( !face_cascade.load( path+face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; }; if( !eyes_cascade.load( path+eyes_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; }; if( !mouth_cascade.load( path+mouth_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; }; //-- 2. Read the video stream capture = cvCaptureFromCAM( -1 ); if( capture ) { while( true ) { frame_original = cvQueryFrame( capture ); frame = frame_original.clone(); //-- 3. Apply the classifier to the frame if( !frame.empty() ) { detectAndDisplay( frame ); } else { printf(" --(!) No captured frame -- Break!"); break; } int c = waitKey(10); if( (char)c == 'c' ) { break; } } } return 0; }
int main (int argc, char* argv[]) { const string face_cascade_name = "haarcascade_frontalface_alt.xml"; CascadeClassifier face_cascade; if (!face_cascade.load(face_cascade_name)) { printf("Error loading haarcascade face\n"); return -1; } string inputImgName; Mat frame; if (argc == 1) inputImgName = "group1.jpg"; else if (argc == 2) inputImgName = argv[1]; else printf("please type: facedetector imagename\n\n"); //Read an image frame = imread(inputImgName); if (!frame.empty()) detectAndDisplay(frame, face_cascade); namedWindow("Display", WINDOW_AUTOSIZE); imshow("Display", frame); waitKey(); return 0; }
int main(int argc, char** argv) { if (argc != 2) { cout << " Usage: display_image ImageToLoadAndDisplay" << endl; return -1; } Mat image; image = imread(argv[1], IMREAD_COLOR); // Read the file if (!image.data) // Check for invalid input { cout << "Could not open or find the image" << std::endl; return -1; } //-- 1. Load the cascades if (!face_cascade.load(face_cascade_name)){ printf("--(!)Error loading face cascade\n"); return -1; }; if (!eyes_cascade.load(eyes_cascade_name)){ printf("--(!)Error loading eyes cascade\n"); return -1; }; //-- 2. Check this image detectAndDisplay(image); waitKey(0); return 0; }
////////////////////////////////////////////// // Main program ////////////////////////////////////////////// void handleFrame2() { // Apply the classifier to the frame if( !RaspiCvCam::ImageMat.empty() ) { int result = detectAndDisplay(); if (result>0) { if (CropFace(RaspiCvCam::ImageMat, Myeye_left, Myeye_right, Myoffset_pct, Mydest_sz)==1) { croppedMat = Mat(RaspiCvCam::ImageMat); equalizeHist(croppedMat, croppedMat); croppedImage = QImage((uchar*)croppedMat.data, croppedMat.cols, croppedMat.rows, croppedMat.step, QImage::Format_Indexed8); croppedImage.setColorTable(RaspiCvCam::GrayscaleColorTable); foundCroppedImage = true; searching = false; } else { qDebug("- crop face failed"); } } UsersWindow::Instance->updateCamImage(); } }
//////////////////////////////////////////////////////////////////// // Panel::CascadeClassify() // Description: This function has 2 inputs: a path to an image // and a path to a classifier. We are not using Haar Training // in our current application, but we will leave this function // in case someone decides to use it later. If someone does decide // to use it reference this website: // http://www.memememememememe.me/training-haar-cascades/ // We already have the directory structure and scripts set up // in the repository so it will not be hard, there is just // quite a bit of overhead with Haar training when compared to // our current method. //////////////////////////////////////////////////////////////////// void Panel::CascadeClassify(string sImgPath, string sClassPath) { if (!ShowImage(sImgPath, "Original")) return; detectAndDisplay(m_pPanel->m_Image, sClassPath); }
/** * @function main */ int main( int argc, const char** argv ) { CvCapture* capture; cv::Mat frame; // Load the cascades if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; }; cv::namedWindow(main_window_name,CV_WINDOW_NORMAL); cv::moveWindow(main_window_name, 400, 100); cv::namedWindow(face_window_name,CV_WINDOW_NORMAL); cv::moveWindow(face_window_name, 10, 100); cv::namedWindow("Right Eye",CV_WINDOW_NORMAL); cv::moveWindow("Right Eye", 10, 600); cv::namedWindow("Left Eye",CV_WINDOW_NORMAL); cv::moveWindow("Left Eye", 10, 800); cv::namedWindow("aa",CV_WINDOW_NORMAL); cv::moveWindow("aa", 10, 800); cv::namedWindow("aaa",CV_WINDOW_NORMAL); cv::moveWindow("aaa", 10, 800); createCornerKernels(); ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2), 43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1); // Read the video stream capture = cvCaptureFromCAM( -1 ); if( capture ) { while( true ) { frame = cvQueryFrame( capture ); // mirror it cv::flip(frame, frame, 1); frame.copyTo(debugImage); // Apply the classifier to the frame if( !frame.empty() ) { detectAndDisplay( frame ); } else { printf(" --(!) No captured frame -- Break!"); break; } imshow(main_window_name,debugImage); int c = cv::waitKey(10); if( (char)c == 'c' ) { break; } if( (char)c == 'f' ) { imwrite("frame.png",frame); } } } releaseCornerKernels(); return 0; }
/** * @function main */ int main( int argc, const char** argv ) { cv::Mat frame; // Load the cascades if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; }; cv::namedWindow(main_window_name,CV_WINDOW_NORMAL); cv::moveWindow(main_window_name, 400, 100); cv::namedWindow(face_window_name,CV_WINDOW_NORMAL); cv::moveWindow(face_window_name, 10, 100); cv::namedWindow("Right Eye",CV_WINDOW_NORMAL); cv::moveWindow("Right Eye", 10, 600); cv::namedWindow("Left Eye",CV_WINDOW_NORMAL); cv::moveWindow("Left Eye", 10, 800); cv::namedWindow("aa",CV_WINDOW_NORMAL); cv::moveWindow("aa", 10, 800); cv::namedWindow("aaa",CV_WINDOW_NORMAL); cv::moveWindow("aaa", 10, 800); createCornerKernels(); ellipse(skinCrCbHist, cv::Point(113, 155.6), cv::Size(23.4, 15.2), 43.0, 0.0, 360.0, cv::Scalar(255, 255, 255), -1); frame = cv::imread(argv[1]); frame.copyTo(debugImage); cv::Mat result; // Apply the classifier to the frame if( !frame.empty() ) { result = detectAndDisplay( frame ); } else { printf(" cannot read image. terminating"); return -1; } imshow(main_window_name,debugImage); std::stringstream result_filename; result_filename << argv[1]; result_filename << "_eyes.jpg"; imwrite(result_filename.str().c_str(), result); std::cout << "written file: " << result_filename.str() << std::endl; releaseCornerKernels(); return 0; }
void FaceDetector::head_camera_processing(const sensor_msgs::Image::ConstPtr& msg) { cv_bridge::CvImagePtr cv_ptr_cam; try { cv_ptr_cam = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); } catch (cv_bridge::Exception& e) { ROS_ERROR("cv_bridge exception: %s", e.what()); return; } detectAndDisplay(cv_ptr_cam->image); }
bool getFaceCoord(int* x, int* y, int* z ) { if ( !isInitialized() ) { std::cerr << "Pointers not initialized" << std::endl; return false; } cv::Point pt; int a = 0; if ( detectAndDisplay( pt, a ) ) { *x = pt.x; *y = pt.y; *z = a; return true; } return false; }
void Snapshot::callback(const sensor_msgs::ImageConstPtr& msg) { cv_bridge::CvImagePtr cv_ptr; cv::Mat cvImage; try { cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); } catch(cv_bridge::Exception& e) { ROS_ERROR("cv_bridge exception: %s", e.what() ); } cvImage = cv_ptr->image; detectAndDisplay(cvImage); pub->publish(cv_ptr->toImageMsg() ); }
/** @function main */ int reco( Mat frame ) { //-- 1. Load the cascades if( !face_cascade.load( face_cascade_name ) ){ printf("--(!)Error loading\n"); return -1; }; //-- 2. Read the video stream while( true ) { //-- 3. Apply the classifier to the frame if( !frame.empty() ){ detectAndDisplay( frame ); } else { printf(" --(!) No captured frame -- Break!"); break; } int c = waitKey(10); if( (char)c == 'c' ) { break; } } return 0; }
int main() { capture =cvCaptureFromCAM(0) ; if(capture) { IplImage *frame1, *frame2; cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_WIDTH,column); cvSetCaptureProperty(capture,CV_CAP_PROP_FRAME_HEIGHT,row); for(;;) { frame1 = cvQueryFrame(capture); if(!frame1) { printf("No captured frame ;\n"); break; } else { memset(SumX, 0, sizeof(short)*column); memset(SumY, 0, sizeof(short)*row); YUpperBoundPos = YUpperBoundvalue = YLowerBoundPos = YLowerBoundvalue = 0; frame2 = cvCreateImage( cvSize(column, row),frame1->depth, frame1->nChannels); cvResize(frame1, frame2, CV_INTER_LINEAR); detectAndDisplay(frame2); //printf("%d %d", frame1->width, frame1->height); sleep(0); //break; } char c = cvWaitKey(100); if((char)c == 'c' || (char)c == 'C') { break; } } } }
int main(void) { CGImageRef image; cv::Mat frame; // 1. Load the cascades if (!face_cascade.load(face_cascade_name)) { printf("Error loading face cascade\n"); return -1; } if (!eyes_cascade.load(eyes_cascade_name)) { printf("Error loading eyes cascade\n"); return -1; } // 2. Read the video stream while (true) { image = getDesktopImage(); frame = CGImageToMat(image); if (frame.empty()) { printf("No captures frame -- breaking"); break; } // 3. Apply the classifier to the frame detectAndDisplay(frame); int c = cv::waitKey(10); if ((char)c == 27) { break; } // escape } return 0; }
/* * @function main */ int main( int argc, const char** argv ) { // Get the mode if (argc > 1) { const char *inputMode = argv[1]; if (strcmp(inputMode, "normal") == 0) { mode = NORMAL; } else if (strcmp(inputMode, "debug") == 0) { mode = DEBUG; } else if (strcmp(inputMode, "plot") == 0) { mode = PLOT; } else { mode = NORMAL; } } else { mode = NORMAL; } if (mode == NORMAL) { eventHandler = EventHandler(); } if (mode == DEBUG || mode == NORMAL) { printf("Input Mode: %s\n", mode == NORMAL ? "normal" : mode == DEBUG ? "debug" : mode == PLOT ? "plot" : "none"); cv::namedWindow(main_window_name,CV_WINDOW_NORMAL); cv::moveWindow(main_window_name, 400, 100); cv::namedWindow(face_window_name,CV_WINDOW_NORMAL); cv::moveWindow(face_window_name, 10, 100); cv::namedWindow("Right Eye",CV_WINDOW_NORMAL); cv::moveWindow("Right Eye", 10, 600); cv::namedWindow("Left Eye",CV_WINDOW_NORMAL); cv::moveWindow("Left Eye", 10, 800); } else if (mode == PLOT) { cv::namedWindow(face_window_name,CV_WINDOW_NORMAL); cv::moveWindow(face_window_name, 400, 100); } cv::Mat frame; // Load the cascades if( !face_cascade.load( FACE_CASCADE_FILE ) ){ printf("--(!)Error loading face cascade, please change face_cascade_name in source code.\n"); return -1; }; // Read the video stream cv::VideoCapture capture( 0 ); if( capture.isOpened() ) { capture.set(CV_CAP_PROP_FRAME_WIDTH, 640); capture.set(CV_CAP_PROP_FRAME_HEIGHT, 480); capture.set(CV_CAP_PROP_FPS, 15); capture >> frame; while( true ) { capture >> frame; // mirror it cv::flip(frame, frame, 1); frame.copyTo(debugImage); // Apply the classifier to the frame if( !frame.empty() ) { detectAndDisplay( frame ); } else { printf(" --(!) No captured frame -- Break!"); break; } if (mode == DEBUG || mode == NORMAL) { imshow(main_window_name, debugImage); } if (mode == DEBUG || mode == PLOT || mode == NORMAL) { int c = cv::waitKey(10); if( (char)c == 'c' ) { break; } if( (char)c == 'f' ) { imwrite("frame.png", frame); } } } }