PERF_TEST(HOGFixture, HOG) { Mat src = imread(getDataPath("gpu/hog/road.png"), cv::IMREAD_GRAYSCALE); ASSERT_TRUE(!src.empty()) << "can't open input image road.png"; vector<cv::Rect> found_locations; declare.in(src).time(5); if (RUN_PLAIN_IMPL) { HOGDescriptor hog; hog.setSVMDetector(hog.getDefaultPeopleDetector()); TEST_CYCLE() hog.detectMultiScale(src, found_locations); std::sort(found_locations.begin(), found_locations.end(), RectLess()); SANITY_CHECK(found_locations, 1 + DBL_EPSILON); } else if (RUN_OCL_IMPL) { ocl::HOGDescriptor ocl_hog; ocl_hog.setSVMDetector(ocl_hog.getDefaultPeopleDetector()); ocl::oclMat oclSrc(src); OCL_TEST_CYCLE() ocl_hog.detectMultiScale(oclSrc, found_locations); std::sort(found_locations.begin(), found_locations.end(), RectLess()); SANITY_CHECK(found_locations, 1 + DBL_EPSILON); } else OCL_PERF_ELSE }
int test_trained_detector( String obj_det_filename, String test_dir, String videofilename ) { cout << "Testing trained detector..." << endl; HOGDescriptor hog; hog.load( obj_det_filename ); vector< String > files; glob( test_dir, files ); int delay = 0; VideoCapture cap; if ( videofilename != "" ) { cap.open( videofilename ); } obj_det_filename = "testing " + obj_det_filename; namedWindow( obj_det_filename, WINDOW_NORMAL ); for( size_t i=0;; i++ ) { Mat img; if ( cap.isOpened() ) { cap >> img; delay = 1; } else if( i < files.size() )
void load_images(const string & filename, int label) { HOGDescriptor hog; hog.winSize = size; string line; ifstream file; vector<float> descriptors; vector<Point> locations; file.open(filename.c_str()); if (!file.is_open()) { cout << "file cannot be opened" << endl; exit(-1); } while (true) { getline(file, line); if (line == "") { break; } Mat img = imread(line.c_str(), 0); if (img.empty()) continue; resize(img, img, size); hog.compute(img, descriptors, Size(8, 8), Size(0, 0), locations); training_list.push_back(Mat(descriptors).clone()); training_label.push_back(label); img.release(); } cout << training_list.size() << endl; cout << training_label.size() << endl; }
void computeHOGs( const Size wsize, const vector< Mat > & img_lst, vector< Mat > & gradient_lst, bool use_flip ) { HOGDescriptor hog; hog.winSize = wsize; Mat gray; vector< float > descriptors; for( size_t i = 0 ; i < img_lst.size(); i++ ) { if ( img_lst[i].cols >= wsize.width && img_lst[i].rows >= wsize.height ) { Rect r = Rect(( img_lst[i].cols - wsize.width ) / 2, ( img_lst[i].rows - wsize.height ) / 2, wsize.width, wsize.height); cvtColor( img_lst[i](r), gray, COLOR_BGR2GRAY ); hog.compute( gray, descriptors, Size( 8, 8 ), Size( 0, 0 ) ); gradient_lst.push_back( Mat( descriptors ).clone() ); if ( use_flip ) { flip( gray, gray, 1 ); hog.compute( gray, descriptors, Size( 8, 8 ), Size( 0, 0 ) ); gradient_lst.push_back( Mat( descriptors ).clone() ); } } } }
int hog(string name, int i) { int ImgWidht = 120; int ImgHeight = 120; Mat src; Mat trainImg = Mat::zeros(ImgHeight, ImgWidht, CV_8UC3);//需要分析的图片 src = imread(name.c_str(), 1); //cout << "HOG: processing " << name.c_str() << endl; resize(src, trainImg, cv::Size(ImgWidht, ImgHeight), 0, 0, INTER_CUBIC); HOGDescriptor *hog = new HOGDescriptor(cvSize(ImgWidht, ImgHeight), cvSize(16, 16), cvSize(8, 8), cvSize(8, 8), 9); vector<float>descriptors;//结果数组 hog->compute(trainImg, descriptors, Size(1, 1), Size(0, 0)); //调用计算函数开始计算 if (i == 0) { //descSize = descriptors.size(); data_mat = Mat::zeros(nLine, descriptors.size(), CV_32FC1); //根据输入图片大小进行分配空间 //fusion_mat = Mat::zeros(nLine, descriptors.size() + MATSIZE + GLCMSIZE, CV_32FC1); } int n = 0; for (vector<float>::iterator iter = descriptors.begin(); iter != descriptors.end(); iter++) { data_mat.at<float>(i, n) = *iter; //fusion_mat.at<float>(i, n) = *iter; n++; } //cout << "HOG: end processing " << name.c_str() << endl; delete hog; return 0; }
JNIEXPORT void JNICALL Java_org_opencv_objdetect_HOGDescriptor_compute_10 (JNIEnv* env, jclass , jlong self, jlong img_nativeObj, jlong descriptors_mat_nativeObj, jdouble winStride_width, jdouble winStride_height, jdouble padding_width, jdouble padding_height, jlong locations_mat_nativeObj) { static const char method_name[] = "objdetect::compute_10()"; try { LOGD("%s", method_name); vector<float> descriptors; Mat& descriptors_mat = *((Mat*)descriptors_mat_nativeObj); vector<Point> locations; Mat& locations_mat = *((Mat*)locations_mat_nativeObj); Mat_to_vector_Point( locations_mat, locations ); HOGDescriptor* me = (HOGDescriptor*) self; //TODO: check for NULL Mat& img = *((Mat*)img_nativeObj); Size winStride((int)winStride_width, (int)winStride_height); Size padding((int)padding_width, (int)padding_height); me->compute( img, descriptors, winStride, padding, locations ); vector_float_to_Mat( descriptors, descriptors_mat ); return; } catch(const std::exception &e) { throwJavaException(env, &e, method_name); } catch (...) { throwJavaException(env, 0, method_name); } return; }
void kNNSearcher::kNNSearchWithHOG(const Mat &inputImage, const QStringList &imPath, Mat &indexes, Mat &weights, int k) { //resize inputImage to the same size of training image Mat temp = imread( imPath[0].toLocal8Bit().data(), CV_LOAD_IMAGE_GRAYSCALE ); Mat inputIm; resize( inputImage, inputIm, temp.size() ); //compute the HOG descriptor of target image HOGDescriptor *hogDesr = new HOGDescriptor( cvSize( 640, 480 ), cvSize( 160, 120 ), cvSize( 160,120 ), cvSize( 160, 120 ), 9 ); std::vector<float> targetDescriptor; hogDesr->compute( inputIm, targetDescriptor, Size( 0, 0 ), Size( 0, 0) ); //################################################################################### //load the training descriptors into descriptorMat if there exist a HOGof44blocks.yaml file //otherwise, execute the train program Mat descriptorMat; QString const HOGMatfile = "HOGof44blocks.yaml"; FileStorage fs; fs.open( HOGMatfile.toLocal8Bit().data(), FileStorage::READ ); if( fs.isOpened() ){ // the HOGof44blocks.yaml does exist fs["HOGMat"] >> descriptorMat; }else{
JNIEXPORT void JNICALL Java_org_opencv_objdetect_HOGDescriptor_detect_10 (JNIEnv* env, jclass , jlong self, jlong img_nativeObj, jlong foundLocations_mat_nativeObj, jlong weights_mat_nativeObj, jdouble hitThreshold, jdouble winStride_width, jdouble winStride_height, jdouble padding_width, jdouble padding_height, jlong searchLocations_mat_nativeObj) { static const char method_name[] = "objdetect::detect_10()"; try { LOGD("%s", method_name); vector<Point> foundLocations; Mat& foundLocations_mat = *((Mat*)foundLocations_mat_nativeObj); vector<double> weights; Mat& weights_mat = *((Mat*)weights_mat_nativeObj); vector<Point> searchLocations; Mat& searchLocations_mat = *((Mat*)searchLocations_mat_nativeObj); Mat_to_vector_Point( searchLocations_mat, searchLocations ); HOGDescriptor* me = (HOGDescriptor*) self; //TODO: check for NULL Mat& img = *((Mat*)img_nativeObj); Size winStride((int)winStride_width, (int)winStride_height); Size padding((int)padding_width, (int)padding_height); me->detect( img, foundLocations, weights, (double)hitThreshold, winStride, padding, searchLocations ); vector_Point_to_Mat( foundLocations, foundLocations_mat ); vector_double_to_Mat( weights, weights_mat ); return; } catch(const std::exception &e) { throwJavaException(env, &e, method_name); } catch (...) { throwJavaException(env, 0, method_name); } return; }
/** * Test the trained detector against the same training set to get an approximate idea of the detector. * Warning: This does not allow any statement about detection quality, as the detector might be overfitting. * Detector quality must be determined using an independent test set. * @param hog */ static void detectTrainingSetTest(const HOGDescriptor& hog, const double hitThreshold, const vector<string>& posFileNames, const vector<string>& negFileNames) { unsigned int truePositives = 0; unsigned int trueNegatives = 0; unsigned int falsePositives = 0; unsigned int falseNegatives = 0; vector<Point> foundDetection; // Walk over positive training samples, generate images and detect for (vector<string>::const_iterator posTrainingIterator = posFileNames.begin(); posTrainingIterator != posFileNames.end(); ++posTrainingIterator) { const Mat imageData = imread(*posTrainingIterator, 0); hog.detect(imageData, foundDetection, hitThreshold, winStride, trainingPadding); if (foundDetection.size() > 0) { ++truePositives; falseNegatives += foundDetection.size() - 1; } else { ++falseNegatives; } } // Walk over negative training samples, generate images and detect for (vector<string>::const_iterator negTrainingIterator = negFileNames.begin(); negTrainingIterator != negFileNames.end(); ++negTrainingIterator) { const Mat imageData = imread(*negTrainingIterator, 0); hog.detect(imageData, foundDetection, hitThreshold, winStride, trainingPadding); if (foundDetection.size() > 0) { falsePositives += foundDetection.size(); } else { ++trueNegatives; } } printf("Results:\n\tTrue Positives: %u\n\tTrue Negatives: %u\n\tFalse Positives: %u\n\tFalse Negatives: %u\n", truePositives, trueNegatives, falsePositives, falseNegatives); }
vector<float> Hog(Mat image) { vector<float> descriptors; HOGDescriptor* hog = new HOGDescriptor(cvSize(60, 60), cvSize(10, 10), cvSize(5, 5), cvSize(5, 5), 9); hog->compute(image,descriptors, Size(1, 1), Size(0, 0)); return descriptors; }
bool getFeatureFromImg(Mat img,vector<float> &feature) { HOGDescriptor hog; hog.winSize=Size(img.cols,img.rows); Size winStride=Size(16,16); hog.compute(img,feature,winStride); return true; }
vector<Rect> detect(InputArray img) { // Run the detector with default parameters. to get a higher hit-rate // (and more false alarms, respectively), decrease the hitThreshold and // groupThreshold (set groupThreshold to 0 to turn off the grouping completely). vector<Rect> found; if (m == Default) hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2, false); else if (m == Daimler) hog_d.detectMultiScale(img, found, 0.5, Size(8,8), Size(32,32), 1.05, 2, true); return found; }
particleFilter::particleFilter() { totalParticles = 300; gsl_rng_env_setup(); rng = gsl_rng_alloc(gsl_rng_mt19937); gsl_rng_set(rng, time(NULL)); vector<CvRect> region; vector<float> descriptorVector = getDescriptorVectorFromFile(descriptorVectorFile); HOGDescriptor hog; hog.winSize = Size(24, 48); hog.blockStride = Size(1,2); hog.setSVMDetector(descriptorVector); }
int testHOG(Mat data, Mat res) { CvSVM svm; CvSVMParams param; CvTermCriteria criteria; criteria = cvTermCriteria(CV_TERMCRIT_EPS, 1000, FLT_EPSILON); param = CvSVMParams(CvSVM::C_SVC, CvSVM::LINEAR, 10.0, 0.1, 0.09, 100.0, 0.5, 1.0, NULL, criteria);//for hog svm.train(data, res, Mat(), Mat(), param); int ImgWidth = 120; int ImgHeight = 120; string buf; vector<string> img_tst_path; ifstream img_tst("SVM_TEST.txt"); while (img_tst) { if (getline(img_tst, buf)) { img_tst_path.push_back(buf); } } img_tst.close(); Mat test; Mat trainImg = Mat::zeros(ImgHeight, ImgWidth, CV_8UC3);//需要分析的图片 char line[512]; ofstream predict_txt("SVM_PREDICT_HOG.txt"); for (string::size_type j = 0; j != img_tst_path.size(); j++) { test = imread(img_tst_path[j].c_str(), 1);//读入图像 resize(test, trainImg, cv::Size(ImgWidth, ImgHeight), 0, 0, INTER_CUBIC);//要搞成同样的大小才可以检测到 HOGDescriptor *hog = new HOGDescriptor(cvSize(ImgWidth, ImgHeight), cvSize(16, 16), cvSize(8, 8), cvSize(8, 8), 9); vector<float>descriptors;//结果数组 hog->compute(trainImg, descriptors, Size(1, 1), Size(0, 0)); //调用计算函数开始计算 cout << "The Detection Result:" << endl; Mat SVMtrainMat = Mat::zeros(1, descriptors.size(), CV_32FC1); int n = 0; for (vector<float>::iterator iter = descriptors.begin(); iter != descriptors.end(); iter++) { SVMtrainMat.at<float>(0, n) = *iter; n++; } int ret = svm.predict(SVMtrainMat); res_hog.push_back(ret); std::sprintf(line, "%s\t%d\n", img_tst_path[j].c_str(), ret); printf("%s %d\n", img_tst_path[j].c_str(), ret); predict_txt << line; delete hog; } predict_txt.close(); return 0; }
HOGDescriptor HOGTrainer::getHOG() { if (trained != "") { Ptr<SVM> svm = StatModel::load<SVM>(trained); HOGDescriptor hog; hog.winSize = size; vector<float> hogDetector; getSVMDetector(svm, hogDetector); hog.setSVMDetector(hogDetector); return hog; } return cv::HOGDescriptor(); }
void test_it( const Size & size ) { char key = 27; Scalar reference( 0, 255, 0 ); Scalar trained( 0, 0, 255 ); Mat img, draw; Ptr<SVM> svm; HOGDescriptor hog; HOGDescriptor my_hog; my_hog.winSize = size; VideoCapture video; vector< Rect > locations; // Load the trained SVM. svm = StatModel::load<SVM>( "my_people_detector.yml" ); // Set the trained svm to my_hog vector< float > hog_detector; get_svm_detector( svm, hog_detector ); my_hog.setSVMDetector( hog_detector ); // Set the people detector. hog.setSVMDetector( hog.getDefaultPeopleDetector() ); // Open the camera. video.open(0); if( !video.isOpened() ) { cerr << "Unable to open the device 0" << endl; exit( -1 ); } bool end_of_process = false; while( !end_of_process ) { video >> img; if( img.empty() ) break; draw = img.clone(); locations.clear(); hog.detectMultiScale( img, locations ); draw_locations( draw, locations, reference ); locations.clear(); my_hog.detectMultiScale( img, locations ); draw_locations( draw, locations, trained ); imshow( "Video", draw ); key = (char)waitKey( 10 ); if( 27 == key ) end_of_process = true; } }
vector<Rect> HogDetectPeople(Mat img) { HOGDescriptor hog; hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector()); fflush(stdout); vector<Rect> found, found_filtered; double t = (double)getTickCount(); // run the detector with default parameters. to get a higher hit-rate // (and more false alarms, respectively), decrease the hitThreshold and // groupThreshold (set groupThreshold to 0 to turn off the grouping completely). hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2); t = (double)getTickCount() - t; printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency()); size_t i, j; for( i = 0; i < found.size(); i++ ) { Rect r = found[i]; for( j = 0; j < found.size(); j++ ) if( j != i && (r & found[j]) == r) break; if( j == found.size() ) found_filtered.push_back(r); } for( i = 0; i < found_filtered.size(); i++ ) { Rect r = found_filtered[i]; // the HOG detector returns slightly larger rectangles than the real objects. // so we slightly shrink the rectangles to get a nicer output. r.x += cvRound(r.width*0.1); r.width = cvRound(r.width*0.8); r.y += cvRound(r.height*0.07); r.height = cvRound(r.height*0.8); if(r.x+r.width>img.cols-1) { r.x=img.cols-1-r.width;} if(r.x<0) r.x=0; if(r.y+r.height>img.rows-1) r.y=img.rows-1-r.height; if(r.y<0) r.y=0; found_filtered[i].x=r.x; found_filtered[i].y=r.y; found_filtered[i].width=r.width; found_filtered[i].height=r.height; // rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 3); } return found_filtered; }
void HOGTrainer::testIt(const string fileName) { if (trained != "") { char key = 27; Scalar sReference(0, 255, 0); Scalar sTrained(0, 0, 255); Mat img, draw; Ptr<SVM> svm; HOGDescriptor hog; HOGDescriptor my_hog; my_hog.winSize = size; VideoCapture *video; vector<Rect> locations; // Load the sTrained SVM. svm = StatModel::load<SVM>(trained); // Set the sTrained svm to my_hog vector<float> hog_detector; getSVMDetector(svm, hog_detector); my_hog.setSVMDetector(hog_detector); // Set the people detector. hog.setSVMDetector(hog.getDefaultPeopleDetector()); // Open the camera. video = new VideoCapture(fileName); if (!video->isOpened()) { cerr << "Unable to open the device 0" << endl; exit(-1); } bool end_of_process = false; while (!end_of_process) { video->read(img); if (img.empty()) break; draw = img.clone(); locations.clear(); hog.detectMultiScale(img, locations); drawLocations(draw, locations, sReference); locations.clear(); my_hog.detectMultiScale(img, locations); drawLocations(draw, locations, sTrained); imshow("Video", draw); key = (char) waitKey(10); if (27 == key) end_of_process = true; } } }
int main (int argc, const char * argv[]) { VideoCapture cap(CV_CAP_ANY); cap.set(CV_CAP_PROP_FRAME_WIDTH, 640); cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480); if (!cap.isOpened()) return -1; Mat img; HOGDescriptor hog; hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector()); namedWindow("video capture", CV_WINDOW_AUTOSIZE); while (true) { cap >> img; if (!img.data) continue; vector<Rect> found, found_filtered; hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2); //should be able to utilize found.size() as the person count //eliminating the graphics display and plotting should //speed things up. size_t i, j; for (i=0; i<found.size(); i++) { Rect r = found[i]; for (j=0; j<found.size(); j++) if (j!=i && (r & found[j])==r) break; if (j==found.size()) found_filtered.push_back(r); } for (i=0; i<found_filtered.size(); i++) { Rect r = found_filtered[i]; r.x += cvRound(r.width*0.1); r.width = cvRound(r.width*0.8); r.y += cvRound(r.height*0.06); r.height = cvRound(r.height*0.9); rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 2); } imshow("video capture", img); if (waitKey(20) >= 0) break; } return 0; }
void CvPeopleDetector::Run() { HOGDescriptor hog; hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector()); // Get the new frame m_img = m_pImgProcessor->GetRGBImage(); // Clear the previously detected people m_found_filtered.clear(); std::vector<Rect> found; cv::Size winStride(8, 8); if (m_params.type == Params::SMALL_WIN) { winStride.width = 8; winStride.height = 8; } else if (m_params.type == Params::MEDIUM_WIN) { winStride.width = 16; winStride.height = 16; } else if (m_params.type == Params::LARGE_WIN) { winStride.width = 32; winStride.height = 32; } hog.detectMultiScale(m_img, found, m_params.hitThreshold, winStride, Size(32,32), m_params.scaleFactor, m_params.groupThreshold); //hog.detectMultiScale(m_img, found, 0, Size(8,8), Size(32,32), 1.05, 2); size_t i, j; for( i = 0; i < found.size(); i++ ) { Rect r = found[i]; for( j = 0; j < found.size(); j++ ) if( j != i && (r & found[j]) == r) break; if( j == found.size() ) m_found_filtered.push_back(r); } }
void testInVideo() { char key = 27; Scalar reference(0, 255, 0); Scalar trained(0, 0, 255); Mat img, draw; HOGDescriptor hog; hog.winSize = size; VideoCapture video; vector<Rect> locations; Ptr<SVM> svm = StatModel::load<SVM>("coral-detector.yml"); vector<float> hog_detector; get_svm_detector(svm, hog_detector); hog.setSVMDetector(hog_detector); string filename = "/home/modasshir/Documents/1080.mp4"; video.open(filename); if (!video.isOpened()) { cerr << "Unable to open the device 0" << endl; exit(-1); } int i = 2500; namedWindow("Video",WINDOW_OPENGL); bool end_of_process = false; while (!end_of_process) { video.set(CV_CAP_PROP_POS_FRAMES, i); video >> img; if (img.empty()) break; Size s = img.size(); resize(img,img,Size(s.width/4,s.height/4)); draw = img.clone(); hog.detectMultiScale(img, locations); draw_locations(draw, locations, reference); locations.clear(); imshow("Video", draw); key = (char) waitKey(50); if (27 == key) end_of_process = true; i = i + 15; } }
void detectPeople(Mat frame, bool isFlip) { vector<Rect> found, found_filtered; // we shouldn't need to flip anything - if we always use landscape mode if (isFlip) { Mat flippedFrame; flip(frame, flippedFrame, 1); flippedFrame.copyTo(frame); } hog.detectMultiScale(frame, found, 0, Size(8,8), Size(32,32), 1.05, 2); LOGD("found %d", found.size()); for (int i = 0; i < found.size(); ++i) { Rect r = found[i]; int j = 0; for (; j < found.size(); ++j) { // what does & mean for Rect? if (j != i && (r & found[j]) == r) { break; } } if (j == found.size()) { found_filtered.push_back(r); } } for (int i = 0; i < found_filtered.size(); ++i) { Rect r = found_filtered[i]; rectangle(frame, r.tl(), r.br(), Scalar(255,0,0), 3); } }
/* * === FUNCTION ====================================================================== * Name: consumer * Description: 处理图像线程,计算hog和显示 * ===================================================================================== */ void consumer(void) { while (true){ vector<Rect> found, found_filtered; spsc_queue.pop(showimg); hog.detectMultiScale(showimg, found, 0, Size(4,4), Size(0,0), 1.05, 2); for (i=0; i<found.size(); i++) { Rect r = found[i]; for (j=0; j<found.size(); j++) if (j!=i && (r & found[j])==r) break; if (j==found.size()) found_filtered.push_back(r); } for (i=0; i<found_filtered.size(); i++) { Rect r = found_filtered[i]; r.x += cvRound(r.width*0.1); r.width = cvRound(r.width*0.8); r.y += cvRound(r.height*0.06); r.height = cvRound(r.height*0.9); rectangle(showimg, r.tl(), r.br(), cv::Scalar(0,255,0), 1); } imshow("1",showimg); waitKey(5); } }
//not using hole traffic ligh as samples,just use the square light int RecognizeLight(IplImage* srcImg,CvRect iRect) { CvSize cutSize; cutSize.width=iRect.width; cutSize.height=iRect.height; IplImage *tmpCutImg=cvCreateImage(cutSize,srcImg->depth,srcImg->nChannels); GetImageRect(srcImg,iRect,tmpCutImg); #if IS_CUTIMG cvShowImage("tmpCutImg",tmpCutImg); cvWaitKey(1); char tmpName[100]; static int ppp=0; ppp++; sprintf_s(tmpName,"ImgCut//%d.jpg",ppp); cvSaveImage(tmpName,tmpCutImg); #endif Mat cutMat(tmpCutImg); Mat tmpTLRec; vector<float> descriptor; //识别信号灯类别 resize(cutMat,tmpTLRec,Size(TLREC_WIDTH,TLREC_HEIGHT)); TLRecHOG.compute(tmpTLRec,descriptor,Size(8,8)); int DescriptorDim=descriptor.size(); Mat SVMTLRecMat(1,DescriptorDim,CV_32FC1); for(int i=0; i<DescriptorDim; i++) SVMTLRecMat.at<float>(0,i) = descriptor[i]; int result=TLRecSVM.predict(SVMTLRecMat); cvReleaseImage(&tmpCutImg); return result; }
/** * Test detection with custom HOG description vector * @param hog * @param hitThreshold threshold value for detection * @param imageData */ static void detectTest(const HOGDescriptor& hog, const double hitThreshold, Mat& imageData, vector<Rect>& found, vector<double>& weights) { //vector<Rect> found; Size padding(Size(32, 32)); Size winStride(Size(8, 8)); hog.detectMultiScale(imageData, found, weights, hitThreshold, winStride, padding, 1.05, 1); showDetections(found, imageData); }
/** * Test detection with custom HOG description vector * @param hog * @param hitThreshold threshold value for detection * @param imageData */ static void detectTest(const HOGDescriptor& hog, const double hitThreshold, Mat& imageData) { vector<Rect> found; Size padding(Size(8, 8)); Size winStride(Size(8, 8)); hog.detectMultiScale(imageData, found, hitThreshold, winStride, padding); showDetections(found, imageData); }
/** * This is the actual calculation from the (input) image data to the HOG descriptor/feature vector using the hog.compute() function * @param imageFilename file path of the image file to read and calculate feature vector from * @param descriptorVector the returned calculated feature vector<float> , * I can't comprehend why openCV implementation returns std::vector<float> instead of cv::MatExpr_<float> (e.g. Mat<float>) * @param hog HOGDescriptor containin HOG settings */ static void calculateFeaturesFromInput(const string& imageFilename, vector<float>& featureVector, HOGDescriptor& hog) { /** for imread flags from openCV documentation, * @see http://docs.opencv.org/modules/highgui/doc/reading_and_writing_images_and_video.html?highlight=imread#Mat imread(const string& filename, int flags) * @note If you get a compile-time error complaining about following line (esp. imread), * you either do not have a current openCV version (>2.0) * or the linking order is incorrect, try g++ -o openCVHogTrainer main.cpp `pkg-config --cflags --libs opencv` */ Mat imageData = imread(imageFilename, 0); if (imageData.empty()) { featureVector.clear(); printf("Error: HOG image '%s' is empty, features calculation skipped!\n", imageFilename.c_str()); return; } // hack: change dimensions //Size size(32,64); //resize(imageData, imageData, size); // Check for mismatching dimensions if (imageData.cols != hog.winSize.width || imageData.rows != hog.winSize.height) { featureVector.clear(); printf("Error: Image '%s' dimensions (%u x %u) do not match HOG window size (%u x %u)!\n", imageFilename.c_str(), imageData.cols, imageData.rows, hog.winSize.width, hog.winSize.height); return; } vector<Point> locations; hog.compute(imageData, featureVector, winStride, trainingPadding, locations); imageData.release(); // Release the image again after features are extracted }
int main(int argc, char** argv) { cout << "boost::lockfree::queue is "; if (!spsc_queue.is_lock_free()) cout << "not "; cout << "lockfree" << endl; vector<float> detector = load_lear_model(argv[1]); /* load model */ hog.setSVMDetector(detector); cap.open("d://1.avi"); //cap.open("http://10.104.5.192:8888/?action=stream?dummy=param.mjpg");//get image by mjpeg stream cap.set(CV_CAP_PROP_FRAME_WIDTH, 320); cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240); if (!cap.isOpened()) return -1; thread mThread( producer ); Sleep(5000); /* 让生产者先生产一会儿 */ thread mThread2( consumer ); mThread.join(); mThread2.join(); return 0; }
JNIEXPORT jdouble JNICALL Java_org_opencv_objdetect_HOGDescriptor_getWinSigma_10 (JNIEnv* env, jclass , jlong self) { static const char method_name[] = "objdetect::getWinSigma_10()"; try { LOGD("%s", method_name); HOGDescriptor* me = (HOGDescriptor*) self; //TODO: check for NULL double _retval_ = me->getWinSigma( ); return _retval_; } catch(const std::exception &e) { throwJavaException(env, &e, method_name); } catch (...) { throwJavaException(env, 0, method_name); } return 0; }
void detect_hog_inria(VideoCapture *vc) { // detector (64x128 template) HOGDescriptor hog; hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector()); // parameters double hit_thr = 0; double gr_thr = 2; Mat frame; __int64 freq,start,finish; ::QueryPerformanceFrequency((_LARGE_INTEGER*)&freq); while(1) { // input image *vc >> frame; if(frame.empty()) break; ::QueryPerformanceCounter((_LARGE_INTEGER*)&start); // detect vector<Rect> found; hog.detectMultiScale(frame, found, hit_thr, Size(8,8), Size(32,32), 1.05, gr_thr); // processing time (fps) ::QueryPerformanceCounter((_LARGE_INTEGER*)&finish); double fps = freq / double(finish - start + 1); char fps_str[20]; sprintf_s(fps_str, 20, "FPS: %.1lf", fps); putText(frame, fps_str, Point(5, 35), FONT_HERSHEY_SIMPLEX, 1., Scalar(0,255,0), 2); // draw results (bounding boxes) for(int i=0; i<(int)found.size(); i++) rectangle(frame, found[i], Scalar(0,255,0), 2); // display imshow("darkpgmr", frame); char ch = waitKey(10); if( ch == 27 ) break; // ESC Key else if(ch == 32 ) // SPACE Key { while((ch = waitKey(10)) != 32 && ch != 27); if(ch == 27) break; } } }