int main(int argc, char ** argv) { /** Example takes a directory with images .jpg **/ if(argc > 1){ vector<string> images = Support::pathVector(argv[1],".jpg"); cout << argv[1] << endl; //Init face detector FaceDetector *fd = new FaceDetector(); // for each image in directory for(uint i = 0; i < images.size() ; ++i){ // read image Mat img = imread(images[i]); cv::Mat rot; fd->detectFromImage(img,rot); imshow(Support::getFilePath(images[i]), fd->getCroppedImg()); cvSupport::indexBrowser(i,images.size()); // fd->saveCroppedFace(Support::getFilePath(images[i]) + Support::getFileName(images[i])+"_face.jpg"); } delete fd; } // if argc > 1 return 0; }
void processImage(string input, FaceDetector detect) { int count; cv::Mat image; cv::Mat skinMasked; image = cv::imread(input, CV_LOAD_IMAGE_COLOR); // Read the file FaceDetector face; if(! image.data ) // Check for invalid input { cout << "Could not open or find the image" << std::endl; } else { /*Display original image*/ cout << "Here is the original image" << endl; namedWindow( "Original Image", WINDOW_NORMAL);// Create a window for display. imshow( "Original Image", image ); // Show our image inside it. waitKey(0); // Wait for a keystroke in the window /*Display thresholding image*/ skinMasked = face.skinMasking(image); cout << "Here is the skin detected image" << endl; namedWindow("Masked Skin Image", WINDOW_NORMAL); imshow("Masked Skin Image", skinMasked); waitKey(0); // Wait for a keystroke in the window /*count skin tone pixels*/ count = face.skinTonePixels(image); cout <<"Here is the count " << count << endl; } }
int main(int argc, char *argv[]) { google::InitGoogleLogging(argv[0]); FLAGS_alsologtostderr = false; FaceDetector fd; Landmarker lder; fd.LoadXML("../haarcascade_frontalface_alt.xml"); lder.LoadModel("../deeplandmark"); Mat image; Mat gray; image = imread("../test.jpg"); if (image.data == NULL) return -1; cvtColor(image, gray, CV_BGR2GRAY); vector<Rect> bboxes; fd.DetectFace(gray, bboxes); vector<Point2f> landmarks; for (int i = 0; i < bboxes.size(); i++) { BBox bbox_ = BBox(bboxes[i]).subBBox(0.1, 0.9, 0.2, 1); landmarks = lder.DetectLandmark(gray, bbox_); showLandmarks(image, bbox_.rect, landmarks); } return 0; }
int main(int argc, const char * argv[]) { VideoCaptureManager capture; AlgorithmHolder detector; FaceDetector face; cv::namedWindow("Capture", CV_WINDOW_OPENGL|CV_WINDOW_AUTOSIZE|CV_WINDOW_FREERATIO); //! assigns callback for mouse events cv::setMouseCallback("Capture", mouseCallback); cv::Mat_< unsigned char > frame; while(1) { capture.read(frame); if(!frame.empty()){ // detector.detect(frame); face.detect(frame); if(cv::waitKey( 30 ) > 27){ std::cerr << detector.select() << std::endl; } cv::imshow( "Capture", frame); } } }
void video_thread_CL(void* pParams) { FaceDetector *faceDetector; if (threadUseCL){ faceDetector = (FaceDetectorCL*)pParams; } else{ faceDetector = (FaceDetectorCpu*)pParams; } std::string name = faceDetector->name(); //HAAR_EYE_TREE_EYEGLASSES_DATA //HAAR_EYE_DATA //HAAR_FRONT_FACE_DEFAULT_DATA //LBP_FRONTAL_FACE //LBP_PROFILE_FACE faceDetector->load(HAAR_FRONT_FACE_DEFAULT_DATA); VideoCapture videoCapture; cv::Mat frame, frameCopy, image; videoCapture.open(faceDetector->videoFile().c_str()); if (!videoCapture.isOpened()) { cout << "No video detected" << endl; return; } if (imshowFlag) { cv::namedWindow(name.c_str(), 1); } if (videoCapture.isOpened()) { cout << "In capture ..." << name.c_str() << endl; while (videoCapture.grab()) { if (!videoCapture.retrieve(frame, 0)) { break; } faceDetector->setSrcImg(frame, 1); faceDetector->doWork(); if (imshowFlag){ cv::imshow(name.c_str(), faceDetector->resultMat()); } std::vector<cv::Rect> &faces_result = faceDetector->getResultFaces(); std::cout << "face --" << name.c_str() << std::endl; for (int i = 0; i < faces_result.size(); ++i){ std::cout << faces_result.at(i).x << ", " << faces_result.at(i).y << std::endl; } if (waitKey(10) >= 0){ videoCapture.release(); break; } Sleep(1); } } if (imshowFlag) { cvDestroyWindow(name.c_str()); } finishTaskFlag++; _endthread(); return; }
/* Change Threshold helper */ void thresholdChange(string input, FaceDetector detect) { size_t pos; int crMin, crMax, cbMin, cbMax; cout<< "Please input crMin" << endl; getline(cin, input); crMin = stoul(input, &pos, 10); cout<< "Please input crMax" << endl; getline(cin, input); crMax = stoul(input, &pos, 10); cout<< "Please input cbMin" << endl; getline(cin, input); cbMin = stoul(input, &pos, 10); cout<< "Please input cbMax" << endl; getline(cin, input); cbMax = stoul(input, &pos, 10); /* OBSERVATIONS yMin and yMax do not seem to make much of a difference */ detect.thresholdChange(0, 255, crMin, crMax, cbMin, cbMax); }
void ProcessImage::process(int index){ if (index > -1 && index < _imagesList.size() ){ // DETECTE FACES FaceDetector *detector = new FaceDetector( index, QImage2Mat( _imagesList.at( index ) ) ); detector->start(); connect( detector, SIGNAL(detectionFinished( int, QList<struct DetectorData> )), SLOT( insertNewData(int, QList<struct DetectorData>) ) ); // _detectorData = detector->getData(); } else { qWarning() << "index not in -1 > index > _imagesList.size() rang in ProcessImage::process" << index; } }
/** * @brief Main * * @param[in] argc * @param[in] argv * * @returns 1 * */ int main( int argc, const char* argv[] ) { if (argc < 4) { banner(); help(); cerr << "ERR: missing parameters" << endl; return -3; } string config_f(argv[1]); string* config_e = NULL; int i = 2; if (argc == 5) { config_e = new string(argv[i++]); } string infile(argv[i++]); string outfile(argv[i++]); FaceDetector* detector; if (config_e != NULL) { detector = new FaceDetector(config_f, *config_e); } else { detector = new FaceDetector(config_f); } delete config_e; try { Mat img = matrix_io_load(infile); Mat cropped; if (detector->detect(img, cropped)) { matrix_io_save(cropped, outfile); } else { //cerr << "ERR: no face found.." << endl; return 1; } delete detector; } catch (int e) { cerr << "ERR: Exception #" << e << endl; return -e; } return 0; }
int Loader::ValidateFace(IplImage* pImg) { FaceDetectResult e = gDetector.Process(pImg); //int i = ShowImage(pImg, "Faces"); if (e != ONE_FACE ) return 0; //if (i == 'y' || i == 'Y') // return 1; return 1; }
void PreProcess( IplImage* src, IplImage** dest ) { if ( *dest ) cvReleaseImage(&*dest); try { FaceDetector* fd = new FaceDetector(src, false); fd->Detect(true); if ( !fd->GetFaceVec().empty() ) { // get the face from the face detector IplImage* face = fd->GetFaceVec()[0]; int width = 100; int height = 100; *dest = cvCreateImage(cvSize(width, height), src->depth, 1); if ( !*dest ) throw std::string("PreProcess could not create dest image"); if ( src->nChannels != 1 ) ConvertToGreyScale(face, face); Resize(face, *dest); // do histogram equalization on the found face cvEqualizeHist(*dest, *dest); } else { throw std::string("FaceDetector could not find face"); } } catch ( ... ) { throw; } }
int main(){ clock_t start,finish; FaceDetector faceDetector; faceDetector.LoadTrainingParams("npd_model_1.mat"); string imgPath = "1.jpg"; Mat img = imread(imgPath); imshow("org",img); IplImage* img_1 ; img_1 = &IplImage(img); start = clock(); faceDetector.Detect(img_1); finish = clock(); double totaltime=(double)(finish-start)/CLOCKS_PER_SEC; cout<<"Time elapsed:"<<totaltime<<endl; Mat rst = Mat(img_1); imshow("rst",rst); waitKey(0); return 0; }
// add training images for a person void addTrainingImagesCb(const sensor_msgs::ImageConstPtr& msg) { // cout << "addTrainingImagesCb" << endl; if (_as.isPreemptRequested() || !ros::ok()) { // std::cout << "preempt req or not ok" << std::endl; ROS_INFO("%s: Preempted", _action_name.c_str()); // set the action state to preempted _as.setPreempted(); // success = false; // break; // cout << "shutting down _image_sub" << endl; _image_sub.shutdown(); return; } if (!_as.isActive()) { // std::cout << "not active" << std::endl; // cout << "shutting down _image_sub" << endl; _image_sub.shutdown(); return; } cv_bridge::CvImagePtr cv_ptr; try { cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); } catch (cv_bridge::Exception& e) { ROS_ERROR("%s: cv_bridge exception: %s", _action_name.c_str(), e.what()); return; } if (_window_rows == 0) { _window_rows = cv_ptr->image.rows; _window_cols = cv_ptr->image.cols; } std::vector<cv::Rect> faces; // _fd.detectFaces(cv_ptr->image, faces, true); std::vector<cv::Mat> faceImgs = _fd.getFaceImgs(cv_ptr->image, faces, true); if (faceImgs.size() > 0) _fc->capture(faceImgs[0]); // call images capturing function _result.names.push_back(_goal_argument); // _result.confidence.push_back(2.0); int no_images_to_click; _ph.getParam("no_training_images", no_images_to_click); if (_fc->getNoImgsClicked() >= no_images_to_click) { // Mat inactive = cv::Mat::zeros(cv_ptr->image.rows, cv_ptr->image.cols, CV_32F); Mat inactive(_window_rows, _window_cols, CV_8UC3, CV_RGB(20,20,20)); appendStatusBar(inactive, "INACTIVE", "Images added. Please train."); cv::imshow(_windowName, inactive); // cv::displayOverlay(_windowName, "Images added", 3000); _as.setSucceeded(_result); } else { // update GUI window // check GUI parameter appendStatusBar(cv_ptr->image, "ADDING IMAGES.", "Images added"); cv::imshow(_windowName, cv_ptr->image); } cv::waitKey(3); }
JNIEXPORT void JNICALL Java_com_lge_ccdevs_tracker_CameraPreview_native_1cv_1facex (JNIEnv *env, jobject thiz, jobject srcimg) { AndroidBitmapInfo bInfo; // uint32_t* bPixs; char *bPixs; int bRet; LOGE("**IN JNI bitmap converter IN!"); //1. retrieve information about the bitmap if ((bRet = AndroidBitmap_getInfo(env, srcimg, &bInfo)) < 0) { LOGE("AndroidBitmap_getInfo failed! error = %d", bRet); return; } if (bInfo.format != ANDROID_BITMAP_FORMAT_RGBA_8888) { LOGE("Bitmap format is not RGBA_8888!"); return; } //2. lock the pixel buffer and retrieve a pointer to it if ((bRet = AndroidBitmap_lockPixels(env, srcimg, (void**)&bPixs)) < 0) { LOGE("AndroidBitmap_lockPixels() failed! error = %d", bRet); return; } //3. convert bitmap to IplImage LOGE("#### Start JNI bitmap processing"); IplImage* img = cvCreateImage(cvSize(bInfo.width,bInfo.height), IPL_DEPTH_8U, 4); memcpy(img->imageData, bPixs, img->imageSize); AndroidBitmap_unlockPixels(env, srcimg); //4. apply processing IplImage* dst = cvCreateImage(cvSize(bInfo.width,bInfo.height), IPL_DEPTH_8U, 4); cvCopy(img, dst); FaceDetector *detector = new FaceDetector(); detector->detect_faces(dst); // detector->display_faces(dst); if(detector->mfaces) { FaceAligner *aligner = new FaceAligner(); CvRect rt_ref = cvRect(0,0,bInfo.width,bInfo.height); aligner->align_faces(dst, *(CvRect*)cvGetSeqElem(detector->mfaces,0), rt_ref); } /* SkBitmap* bitmap = new SkBitmap; bitmap->setConfig(SkBitmap::kARGB_8888_Config, bInfo.width, bInfo.height); if( !bitmap->allocPixels() ) { android_printLog(ANDROID_LOG_DEBUG, "CheckPoint", "Fail: allocPixels"); delete bitmap; return NULL; } char *p = (char *)bitmap->getPixels(); memcpy( p, dst->imageData, dst->imageSize ); */ memcpy( bPixs, dst->imageData, dst->imageSize ); AndroidBitmap_unlockPixels(env, srcimg); cvReleaseImage(&img); cvReleaseImage(&dst); LOGE("#### End processing"); // return env->NewObject(fields.bitmapClazz, fields.bitmapConstructor, (int)bitmap, p, true, NULL, -1); return; }
int main(int argc, char ** argv) { /********************************************************************** * Setup * ******************************************************************** * This block sets up everything needed for the program * * *******************************************************************/ //-- YARP Resource finder (used to extract command line arguments) //--------------------------------------------------------------------- yarp::os::ResourceFinder resourceFinder; resourceFinder.setVerbose(false); resourceFinder.configure(argc, argv); //-- Show usage if requested: //---------------------------------------------------------------------- if (resourceFinder.check("help")) { printUsage(); return 1; } //-- Extract can ids from the command line //---------------------------------------------------------------------- std::vector<int> can_ids; if (!resourceFinder.check("cans")) { std::cerr << "ERROR: no can sequence was specified!" << std::endl; printUsage(); return 2; } std::string can_list = resourceFinder.find("cans").asString(); std::stringstream can_list_ss(can_list); while ( !can_list_ss.eof()) { int temp; can_list_ss >> temp; can_ids.push_back(temp); std::cout << "New target added: " << temp << std::endl; } //-- Setup YARP //----------------------------------------------------------------------- //-- Start yarp server // yarp::os::Network::init(); // yarp::os::Network::runNameServer(argc, argv); //-- Setup Turret //----------------------------------------------------------------------- //-- Read port from command line std::string serial_port_name = resourceFinder.find("port").asString(); if (serial_port_name == "") serial_port_name = "/dev/ttyACM0"; std::cout << "Connecting to serial port: " << serial_port_name << std::endl; //-- Start turret Turret myTurret(serial_port_name); if (!myTurret.start()) { std::cerr << "[Test] Not able to connect!" << std::endl; return 3; } std::cout << "Turret is now connected" << std::endl; //-- Create face detector //----------------------------------------------------------------------- FaceDetector faceDetector; std::cout << "Face detector ready. Safety measures enabled!" << std::endl; //-- Setup webcam //----------------------------------------------------------------------- int camera_id = -1; if (resourceFinder.check("camera")) camera_id = resourceFinder.find("camera").asInt(); std::cout << "Opening camera: " << camera_id << std::endl; cv::VideoCapture capture(camera_id); if(!capture.isOpened()) { report(ERROR,"Camera failed at opening"); return 4; } //-- Setup P controller params //----------------------------------------------------------------------- float kpx = 0.02; float kpy = 0.02; /********************************************************************** * Program itself * ******************************************************************** * This block is the program * * *******************************************************************/ //-- For each target for (int i = 0; i < can_ids.size(); i++) { //-- Loop to track targets //--------------------------------------------------------------------------------------- int error_x = 600, error_y = 600; do { //-- Get image from webcam cv::Mat frame; capture.read(frame); /* * ***** HERE IS WHERE TARGET IS EXTRACTED */ std::vector<cv::Point> targets = getTargetHSV(0, frame); if (targets.size() > 0) { cv::Point target = targets[0]; //-- Calculate error error_x = frame.cols / 2 - target.x; error_y = frame.rows / 2 - target.y; std::cout << "--------------DEBUG--------------------------------" << std::endl; std::cout << "Error x: " << error_x << std::endl; std::cout << "Error y: " << error_y << std::endl; //-- P controller int move_x = - error_x * kpx; int move_y = - error_y * kpy; //-- Command motors myTurret.movePanInc(move_x); myTurret.moveTiltInc(move_y); //-- Plotting target to have feedback //cv::rectangle(frame, faces[0], cv::Scalar(0, 0, 255)); //cv::circle(frame, cv::Point(center_x, center_y ), 2, cv::Scalar(0, 0, 255), 2); cv::circle(frame, target, 3, cv::Scalar(255, 0, 0), 2); }else{ myTurret.seek(); //Seek new targets } //-- This is the scope (substitute it by another thing if needed) cv::circle(frame, cv::Point(frame.cols / 2, frame.rows / 2 ), 2, cv::Scalar(255, 0, 0), 2); //-- Show on screen things cv::imshow("out", frame); char key = cv::waitKey(30); if ( key == 27 || key == 'q' ) return 0; } while (abs(error_x) > THRESH_X || abs(error_y) > THRESH_Y ); //-- Safety loop: (Extract faces) //------------------------------------------------------------------------------------------- std::vector<cv::Rect> faces; do { //-- Get image from webcam cv::Mat frame; capture.read(frame); faces = faceDetector.detect(frame); if (faces.size() == 0) break; std::cout << "Face detected!! Waiting to shoot..." << std::endl; //-- calculate center of bounding box int center_x = faces[0].x + faces[0].width / 2; int center_y = faces[0].y + faces[0].height / 2; //-- Plotting cv::rectangle(frame, faces[0], cv::Scalar(0, 0, 255)); cv::circle(frame, cv::Point(center_x, center_y ), 2, cv::Scalar(0, 0, 255), 2); cv::imshow("out", frame); char key = cv::waitKey(30); if ( key == 27 || key == 'q' ) return 0; } while (faces.size() > 0 ); //-- All is clear: shoot!! //--------------------------------------------------------------------------------------------- myTurret.shoot(); std::cout << "Target \"" << can_ids.at(i) << "\" was shot down!" << std::endl; yarp::os::Time::delay(1); } myTurret.destroy(); yarp::os::Network::fini(); return 0; }
int main(int argc, char* argv[]) { if (argc != 2) { printf("usages: %s dir_name\n", argv[0]); return 0; } std::string dir_path(argv[1]); std::vector < std::string > file_path_array; FaceDetector* detector = new FaceDetector; detector->init("data/front_15_cascade.xml"); CFaceFeature* descriptor = new CFaceFeature; descriptor->LoadFaceAsm("data/facelandmark"); FaceClustering* facecluster = new FaceClustering; Load(dir_path, file_path_array); std::vector<float*> vec_feature_buffer; for (size_t i = 0; i < file_path_array.size(); i++) { Mat pImage = cv::imread(file_path_array[i].c_str(), 1); int imWeight = pImage.cols; int imHeight = pImage.rows; Mat dImage; cv::cvtColor(pImage, dImage, CV_BGR2GRAY); vector < FRECT > faceSet; bool b; { PTIME(pt1, "detect face", true, 5); b = detector->detectFace(dImage.data, imWeight, imHeight, faceSet); } if (b) { int n = faceSet.size(); std::string s = n > 1 ? "faces" : "face"; std::cout << file_path_array[i] << " detect " << n << " " << s << endl; } else { std::cout << file_path_array[i] << " did not detect face(s)" << endl; continue; } vector < rect > faceRegion; for (int i = 0; i < faceSet.size(); i++) { rect faceRect; faceRect.x0 = faceSet[i].x; faceRect.y0 = faceSet[i].y; faceRect.x1 = faceSet[i].x + faceSet[i].w - 1; faceRect.y1 = faceSet[i].y + faceSet[i].h - 1; faceRegion.push_back(faceRect); std::cout << faceRect.x0 << " " << faceRect.y0 << " " << faceRect.x1 << " " << faceRect.y1 << "\n"; } if (faceSet.size() > 0) { std::vector<float*> featureBuffer; { PTIME(pt1, "extractFeature", true, 5); descriptor->extractFeature(dImage.data, imWeight, imHeight, faceRegion, featureBuffer); } cout << endl; vec_feature_buffer.insert( vec_feature_buffer.begin() + vec_feature_buffer.size(), featureBuffer.begin(), featureBuffer.end()); } } vector<int> clusterNum(vec_feature_buffer.size(), -1); cout << "total " << clusterNum.size() << " features to cluster\n"; { PTIME(pt1, "cluster", true, 5); facecluster->cluster(vec_feature_buffer, clusterNum); } for (int i = 0; i < clusterNum.size(); i++) { cout << clusterNum[i] << ","; } cout << endl; return 0; }
// run face recognition on the recieved image void recognizeCb(const sensor_msgs::ImageConstPtr& msg) { // cout << "entering.. recognizeCb" << endl; _ph.getParam("algorithm", _recognitionAlgo); if (_as.isPreemptRequested() || !ros::ok()) { // std::cout << "preempt req or not ok" << std::endl; ROS_INFO("%s: Preempted", _action_name.c_str()); // set the action state to preempted _as.setPreempted(); // success = false; // break; // cout << "shutting down _image_sub" << endl; _image_sub.shutdown(); Mat inactive(_window_rows, _window_cols, CV_8UC3, CV_RGB(20,20,20)); appendStatusBar(inactive, "INACTIVE", ""); cv::imshow(_windowName, inactive); cv::waitKey(3); return; } if (!_as.isActive()) { // std::cout << "not active" << std::endl; // cout << "shutting down _image_sub" << endl; _image_sub.shutdown(); Mat inactive(_window_rows, _window_cols, CV_8UC3, CV_RGB(20,20,20)); appendStatusBar(inactive, "INACTIVE", ""); cv::imshow(_windowName, inactive); cv::waitKey(3); return; } cv_bridge::CvImagePtr cv_ptr; try { cv_ptr = cv_bridge::toCvCopy(msg, sensor_msgs::image_encodings::BGR8); } catch (cv_bridge::Exception& e) { ROS_ERROR("%s: cv_bridge exception: %s", _action_name.c_str(), e.what()); return; } if (_window_rows == 0) { _window_rows = cv_ptr->image.rows; _window_cols = cv_ptr->image.cols; } // clear previous feedback _feedback.names.clear(); _feedback.confidence.clear(); // _result.names.clear(); // _result.confidence.clear(); std::vector<cv::Rect> faces; std::vector<cv::Mat> faceImgs = _fd.getFaceImgs(cv_ptr->image, faces, true); std::map<string, std::pair<string, double> > results; for( size_t i = 0; i < faceImgs.size(); i++ ) { cv::resize(faceImgs[i], faceImgs[i], cv::Size(100.0, 100.0)); cv::cvtColor( faceImgs[i], faceImgs[i], CV_BGR2GRAY ); cv::equalizeHist( faceImgs[i], faceImgs[i] ); // perform recognition results = _fr->recognize(faceImgs[i], ("eigenfaces" == _recognitionAlgo), ("fisherfaces" == _recognitionAlgo), ("lbph" == _recognitionAlgo) ); ROS_INFO("Face %lu:", i); if ("eigenfaces" == _recognitionAlgo) ROS_INFO("\tEigenfaces: %s %f", results["eigenfaces"].first.c_str(), results["eigenfaces"].second); if ("fisherfaces" == _recognitionAlgo) ROS_INFO("\tFisherfaces: %s %f", results["fisherfaces"].first.c_str(), results["fisherfaces"].second); if ("lbph" == _recognitionAlgo) ROS_INFO("\tLBPH: %s %f", results["lbph"].first.c_str(), results["lbph"].second); } // update GUI window // TODO check gui parameter appendStatusBar(cv_ptr->image, "RECOGNITION", ""); cv::imshow(_windowName, cv_ptr->image); cv::waitKey(3); // if faces were detected if (faceImgs.size() > 0) { // recognize only once if (_goal_id == 0) { // std::cout << "goal_id 0. setting succeeded." << std::endl; // cout << _recognitionAlgo << endl; _result.names.push_back(results[_recognitionAlgo].first); _result.confidence.push_back(results[_recognitionAlgo].second); _as.setSucceeded(_result); } // recognize continuously else { _feedback.names.push_back(results[_recognitionAlgo].first); _feedback.confidence.push_back(results[_recognitionAlgo].second); _as.publishFeedback(_feedback); } } }
int main( int argc, const char* argv[] ) { signal(SIGSEGV, sig_handler); // Segmentation fault (core dump) OS Signal handler signal(SIGABRT, sig_handler); // Aborted (core dump) try{ #ifdef OMP_ENABLE omp_set_nested(true); #endif; tsm::Vector< tsm::Vector<char> > *args = getArguments((char**) argv, argc); while( args->length < 2 || args->length > 3 ){ char arguments[200]; printf("Please set your arguments [image model settings]: "); fgets(arguments, 199, stdin); args = getArguments(arguments); } std::string *image_path = new std::string(args->data[0].data); std::string *model_path = new std::string(args->data[1].data); std::string *settings_path = ( args->length > 2 ) ? new std::string(args->data[2].data) : nullptr; #ifdef TIME_PROFILE double readDataTime = omp_get_wtime(); #endif OpenCVHandler<float> imageHandler; imageHandler.imagePath = image_path; if( !imageHandler.readImage() ) throw ERROR_READ_IMAGE_CODE; // Read Image Image<uint8_t> *image = (Image<uint8_t>*) imageHandler.cv2ArrayImage(); if( !image ) throw ERROR_READ_IMAGE_CODE; // Read settings Settings<float> *settings = xmlReader<float>::readXMLSettings(settings_path); if( !settings ) throw ERROR_READ_SETTINGS_CODE; // Read Model Model<float> *model = xmlReader<float>::readXMLModel(model_path); if( !model ) throw ERROR_READ_MODEL_CODE; #ifdef TIME_PROFILE readDataTime = omp_get_wtime() - readDataTime; #endif #ifdef TIME_PROFILE double detectTime = omp_get_wtime(); #endif // Face Detection call FaceDetector<float> *fdetector = new FaceDetector<float>(model, settings, image); if ( !fdetector->detect() ) throw fdetector->errorCode; tsm::Vector< Box<float> > *results = fdetector->boxCache->getResults(); #ifdef TIME_PROFILE detectTime = omp_get_wtime() - detectTime; #endif #ifdef TIME_PROFILE cout << "TSM Face Detection System Time Profile\n"; cout << "1. Read Data : " << readDataTime << "sec\n"; cout << "2. Face Detection : " << detectTime << "sec\n"; #endif // Return Results setOutput(results, settings, &imageHandler); delete model; delete image; delete results; delete settings; } catch(int code){ #ifdef PRINT_ERRORS cout << "TSM System Error Code " << code <<"!\n"; #endif return code; } catch(...){ cout << "TSM System Unkown Error Code!\n"; return ERROR_OS_CODE; } return 0; }
/* Function: DetectAndPreProcess Purpose: Given a file and a name, try to find the face in the image (largest face) Save this file as the name to disk Notes: Throws std::string if somthing goes wrong Returns: true if face found and data saved, false if no face found */ bool DetectAndPreProcess(const char *image, const char* name) { IplImage* faceImage = NULL; bool bRes = false; // try to open the given file containing the face // the one indicates that we assume the image is color faceImage = cvLoadImage(image,1); if ( image ) { try { FaceDetector* fd = new FaceDetector(faceImage, true); // find the largest face in the image fd->Detect(true); IplImage *tempFace; // did we find the face? if ( !fd->GetFaceVec().empty() ) { // get the face from the face detector IplImage* face = fd->GetFaceVec()[0]; // now perform the rest of the preprocessing on the face tempFace = cvCreateImage(cvSize(face->width, face->height), face->depth, 1); ConvertToGreyScale(face, tempFace); Resize(face, tempFace); // do histogram equalization on the found face cvEqualizeHist(tempFace, tempFace); // try to save it to disk if ( !cvSaveImage( name, tempFace ) ) { std::string err; err = "Error: DetectAndPreProcess could not save "; err += image; err += " as "; err += name; throw err; } bRes = true; } delete fd; } catch (...) { throw; } } else { // could not open image std::string err; err = "Error: DetectAndPreProcess could not open "; err += image; throw err; } return bRes; }