예제 #1
0
파일: perf_hog.cpp 프로젝트: ChrisWC/opencv
PERF_TEST(HOGFixture, HOG)
{
    Mat src = imread(getDataPath("gpu/hog/road.png"), cv::IMREAD_GRAYSCALE);
    ASSERT_TRUE(!src.empty()) << "can't open input image road.png";

    vector<cv::Rect> found_locations;
    declare.in(src).time(5);

    if (RUN_PLAIN_IMPL)
    {
        HOGDescriptor hog;
        hog.setSVMDetector(hog.getDefaultPeopleDetector());

        TEST_CYCLE() hog.detectMultiScale(src, found_locations);

        std::sort(found_locations.begin(), found_locations.end(), RectLess());
        SANITY_CHECK(found_locations, 1 + DBL_EPSILON);
    }
    else if (RUN_OCL_IMPL)
    {
        ocl::HOGDescriptor ocl_hog;
        ocl_hog.setSVMDetector(ocl_hog.getDefaultPeopleDetector());
        ocl::oclMat oclSrc(src);

        OCL_TEST_CYCLE() ocl_hog.detectMultiScale(oclSrc, found_locations);

        std::sort(found_locations.begin(), found_locations.end(), RectLess());
        SANITY_CHECK(found_locations, 1 + DBL_EPSILON);
    }
    else
        OCL_PERF_ELSE
}
예제 #2
0
int main(int argc, char** argv)
{
        cout << "boost::lockfree::queue is ";
	if (!spsc_queue.is_lock_free())
		cout << "not ";
	cout << "lockfree" << endl;
	vector<float> detector = load_lear_model(argv[1]); /* load model */
	hog.setSVMDetector(detector);

	cap.open("d://1.avi");
	//cap.open("http://10.104.5.192:8888/?action=stream?dummy=param.mjpg");//get image by mjpeg stream
	cap.set(CV_CAP_PROP_FRAME_WIDTH, 320);
	cap.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
	if (!cap.isOpened())
		return -1;

	thread mThread( producer );
	Sleep(5000);                                /* 让生产者先生产一会儿 */

	thread mThread2( consumer );
	mThread.join();
	mThread2.join();

	return 0;
}
예제 #3
0
void configureHOGDetector(HOGDescriptor& hog, string& descriptorVectorFile)
{
    vector<float> descriptorVector;
    getDescriptorVectorFromFile(descriptorVectorFile, descriptorVector);

    hog.winSize = Size(80, 80); // Default images size used = 80x80 pixels
    hog.setSVMDetector(descriptorVector);
}
예제 #4
0
OCL_TEST_P(HOG, Detect)
{
    HOGDescriptor hog;
    hog.winSize = winSize;
    hog.gammaCorrection = true;

    if (winSize.width == 48 && winSize.height == 96)
        hog.setSVMDetector(hog.getDaimlerPeopleDetector());
    else
        hog.setSVMDetector(hog.getDefaultPeopleDetector());

    std::vector<Rect> cpu_found;
    std::vector<Rect> gpu_found;

    OCL_OFF(hog.detectMultiScale(img, cpu_found, 0, Size(8, 8), Size(0, 0), 1.05, 6));
    OCL_ON(hog.detectMultiScale(uimg, gpu_found, 0, Size(8, 8), Size(0, 0), 1.05, 6));

    EXPECT_LT(checkRectSimilarity(img.size(), cpu_found, gpu_found), 0.05);
}
HOGDescriptor HOGTrainer::getHOG() {
    if (trained != "") {
        Ptr<SVM> svm = StatModel::load<SVM>(trained);
        HOGDescriptor hog;
        hog.winSize = size;
        vector<float> hogDetector;
        getSVMDetector(svm, hogDetector);
        hog.setSVMDetector(hogDetector);
        return hog;
    }
    return cv::HOGDescriptor();
}
particleFilter::particleFilter() {
    totalParticles = 300;
    gsl_rng_env_setup();
    rng = gsl_rng_alloc(gsl_rng_mt19937);
    gsl_rng_set(rng, time(NULL));
    vector<CvRect> region;
    vector<float> descriptorVector = getDescriptorVectorFromFile(descriptorVectorFile);
    HOGDescriptor hog;
    hog.winSize = Size(24, 48);
    hog.blockStride = Size(1,2);
    hog.setSVMDetector(descriptorVector);
}
예제 #7
0
파일: HOG.cpp 프로젝트: HDLynx/sharingan
void test_it( const Size & size )
{
    char key = 27;
    Scalar reference( 0, 255, 0 );
    Scalar trained( 0, 0, 255 );
    Mat img, draw;
    Ptr<SVM> svm;
    HOGDescriptor hog;
    HOGDescriptor my_hog;
    my_hog.winSize = size;
    VideoCapture video;
    vector< Rect > locations;

    // Load the trained SVM.
    svm = StatModel::load<SVM>( "my_people_detector.yml" );
    // Set the trained svm to my_hog
    vector< float > hog_detector;
    get_svm_detector( svm, hog_detector );
    my_hog.setSVMDetector( hog_detector );
    // Set the people detector.
    hog.setSVMDetector( hog.getDefaultPeopleDetector() );
    // Open the camera.
    video.open(0);
    if( !video.isOpened() )
    {
        cerr << "Unable to open the device 0" << endl;
        exit( -1 );
    }

    bool end_of_process = false;
    while( !end_of_process )
    {
        video >> img;
        if( img.empty() )
            break;

        draw = img.clone();

        locations.clear();
        hog.detectMultiScale( img, locations );
        draw_locations( draw, locations, reference );

        locations.clear();
        my_hog.detectMultiScale( img, locations );
        draw_locations( draw, locations, trained );

        imshow( "Video", draw );
        key = (char)waitKey( 10 );
        if( 27 == key )
            end_of_process = true;
    }
}
예제 #8
0
int main (int argc, const char * argv[])
{
    VideoCapture cap(CV_CAP_ANY);
    cap.set(CV_CAP_PROP_FRAME_WIDTH, 640);
    cap.set(CV_CAP_PROP_FRAME_HEIGHT, 480);    
    if (!cap.isOpened())
        return -1;
 
    Mat img;
    HOGDescriptor hog;
    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
 
    namedWindow("video capture", CV_WINDOW_AUTOSIZE);
    while (true)
    {
        cap >> img;
        if (!img.data)
            continue;
 
        vector<Rect> found, found_filtered;
        hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
 
		//should be able to utilize found.size() as the person count
		//eliminating the graphics display and plotting should 
		//speed things up.
 
        size_t i, j;
        for (i=0; i<found.size(); i++)
        {
            Rect r = found[i];
            for (j=0; j<found.size(); j++)
                if (j!=i && (r & found[j])==r)
                    break;
            if (j==found.size())
                found_filtered.push_back(r);
        }
        for (i=0; i<found_filtered.size(); i++)
        {
	    Rect r = found_filtered[i];
            r.x += cvRound(r.width*0.1);
	    r.width = cvRound(r.width*0.8);
	    r.y += cvRound(r.height*0.06);
	    r.height = cvRound(r.height*0.9);
	    rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 2);
	}
        imshow("video capture", img);
        if (waitKey(20) >= 0)
            break;
    }
    return 0;
}
void HOGTrainer::testIt(const string fileName) {
    if (trained != "") {
        char key = 27;
        Scalar sReference(0, 255, 0);
        Scalar sTrained(0, 0, 255);
        Mat img, draw;
        Ptr<SVM> svm;
        HOGDescriptor hog;
        HOGDescriptor my_hog;
        my_hog.winSize = size;
        VideoCapture *video;
        vector<Rect> locations;

        // Load the sTrained SVM.
        svm = StatModel::load<SVM>(trained);
        // Set the sTrained svm to my_hog
        vector<float> hog_detector;
        getSVMDetector(svm, hog_detector);
        my_hog.setSVMDetector(hog_detector);
        // Set the people detector.
        hog.setSVMDetector(hog.getDefaultPeopleDetector());
        // Open the camera.
        video = new VideoCapture(fileName);
        if (!video->isOpened()) {
            cerr << "Unable to open the device 0" << endl;
            exit(-1);
        }

        bool end_of_process = false;
        while (!end_of_process) {
            video->read(img);
            if (img.empty())
                break;

            draw = img.clone();

            locations.clear();
            hog.detectMultiScale(img, locations);
            drawLocations(draw, locations, sReference);

            locations.clear();
            my_hog.detectMultiScale(img, locations);
            drawLocations(draw, locations, sTrained);

            imshow("Video", draw);
            key = (char) waitKey(10);
            if (27 == key)
                end_of_process = true;
        }
    }
}
예제 #10
0
vector<Rect> HogDetectPeople(Mat img)
{	
	
    HOGDescriptor hog;
    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

        fflush(stdout);
        vector<Rect> found, found_filtered;
        double t = (double)getTickCount();
        // run the detector with default parameters. to get a higher hit-rate
        // (and more false alarms, respectively), decrease the hitThreshold and
        // groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
        hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
        t = (double)getTickCount() - t;
        printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
        size_t i, j;
        for( i = 0; i < found.size(); i++ )
        {
            Rect r = found[i];
            for( j = 0; j < found.size(); j++ )
                if( j != i && (r & found[j]) == r)
                    break;
            if( j == found.size() )
                found_filtered.push_back(r);
        }
        for( i = 0; i < found_filtered.size(); i++ )
        {
            Rect r = found_filtered[i];
            // the HOG detector returns slightly larger rectangles than the real objects.
            // so we slightly shrink the rectangles to get a nicer output.
		    r.x += cvRound(r.width*0.1);
            r.width = cvRound(r.width*0.8);
            r.y += cvRound(r.height*0.07);
            r.height = cvRound(r.height*0.8);
			if(r.x+r.width>img.cols-1)
			{	r.x=img.cols-1-r.width;}
			if(r.x<0)
				r.x=0;
			if(r.y+r.height>img.rows-1)
			    r.y=img.rows-1-r.height;
			if(r.y<0)
			    r.y=0;
			found_filtered[i].x=r.x;
			found_filtered[i].y=r.y;
			found_filtered[i].width=r.width;
			found_filtered[i].height=r.height;

           // rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
        }
		return found_filtered;
}
예제 #11
0
void CvPeopleDetector::Run()
{
	HOGDescriptor hog;

    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

	// Get the new frame
	m_img = m_pImgProcessor->GetRGBImage();

	// Clear the previously detected people
	m_found_filtered.clear();
		
	std::vector<Rect> found;

	cv::Size winStride(8, 8);

	if (m_params.type == Params::SMALL_WIN)
	{
		winStride.width = 8;
		winStride.height = 8;
	}
	else if (m_params.type == Params::MEDIUM_WIN)
	{
		winStride.width = 16;
		winStride.height = 16;
	}
	else if (m_params.type == Params::LARGE_WIN)
	{
		winStride.width = 32;
		winStride.height = 32;
	}

	hog.detectMultiScale(m_img, found, m_params.hitThreshold, 
			winStride, Size(32,32), m_params.scaleFactor, m_params.groupThreshold);

	//hog.detectMultiScale(m_img, found, 0, Size(8,8), Size(32,32), 1.05, 2);

	size_t i, j;

	for( i = 0; i < found.size(); i++ )
	{
		Rect r = found[i];

		for( j = 0; j < found.size(); j++ )
			if( j != i && (r & found[j]) == r)
				break;

		if( j == found.size() )
			m_found_filtered.push_back(r);
	}
}
예제 #12
0
// Call back Declaration
ReturnType HumanDetection::onInitialize()
{
	if(m_orig_img != NULL)
		m_orig_img = NULL;
	if(m_result_img != NULL)
		m_result_img = NULL;

	m_in_width = 0;
	m_in_height = 0;

	m_hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

	return OPROS_SUCCESS;
}
예제 #13
0
파일: main.cpp 프로젝트: Modasshir/paper
void testInVideo() {
	char key = 27;
	Scalar reference(0, 255, 0);
	Scalar trained(0, 0, 255);
	Mat img, draw;
	HOGDescriptor hog;
	hog.winSize = size;
	VideoCapture video;
	vector<Rect> locations;
	Ptr<SVM> svm = StatModel::load<SVM>("coral-detector.yml");
	vector<float> hog_detector;
	get_svm_detector(svm, hog_detector);
	hog.setSVMDetector(hog_detector);
	string filename = "/home/modasshir/Documents/1080.mp4";

	video.open(filename);
	if (!video.isOpened()) {
		cerr << "Unable to open the device 0" << endl;
		exit(-1);
	}
	int i = 2500;

	namedWindow("Video",WINDOW_OPENGL);
	bool end_of_process = false;
	while (!end_of_process) {
		video.set(CV_CAP_PROP_POS_FRAMES, i);
		video >> img;
		if (img.empty())
			break;

		Size s = img.size();
		resize(img,img,Size(s.width/4,s.height/4));

		draw = img.clone();


		hog.detectMultiScale(img, locations);
		draw_locations(draw, locations, reference);

		locations.clear();

		imshow("Video", draw);
		key = (char) waitKey(50);
		if (27 == key)
			end_of_process = true;
		i = i + 15;
	}

}
예제 #14
0
void detect_hog_inria(VideoCapture *vc)
{
	// detector (64x128 template)
    HOGDescriptor hog;
    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

	// parameters
	double hit_thr = 0;
	double gr_thr = 2;

	Mat frame;
	__int64 freq,start,finish;
	::QueryPerformanceFrequency((_LARGE_INTEGER*)&freq);
	while(1)
	{
		// input image
		*vc >> frame;
		if(frame.empty()) break;

		::QueryPerformanceCounter((_LARGE_INTEGER*)&start);

		// detect
		vector<Rect> found;
        hog.detectMultiScale(frame, found, hit_thr, Size(8,8), Size(32,32), 1.05, gr_thr);

		// processing time (fps)
		::QueryPerformanceCounter((_LARGE_INTEGER*)&finish);
		double fps = freq / double(finish - start + 1);
		char fps_str[20];
		sprintf_s(fps_str, 20, "FPS: %.1lf", fps);
		putText(frame, fps_str, Point(5, 35), FONT_HERSHEY_SIMPLEX, 1., Scalar(0,255,0), 2);

		// draw results (bounding boxes)
		for(int i=0; i<(int)found.size(); i++)
			rectangle(frame, found[i], Scalar(0,255,0), 2);

		// display
		imshow("darkpgmr", frame);
		char ch = waitKey(10);
		if( ch == 27 ) break;				// ESC Key
		else if(ch == 32 )					// SPACE Key
		{
			while((ch = waitKey(10)) != 32 && ch != 27);
			if(ch == 27) break;
		}
	}
}
예제 #15
0
OCL_PERF_TEST(HOGFixture, HOG)
{
    UMat src;
    imread(getDataPath("gpu/hog/road.png"), cv::IMREAD_GRAYSCALE).copyTo(src);
    ASSERT_FALSE(src.empty());

    vector<cv::Rect> found_locations;
    declare.in(src);

    HOGDescriptor hog;
    hog.setSVMDetector(hog.getDefaultPeopleDetector());

    OCL_TEST_CYCLE() hog.detectMultiScale(src, found_locations);

    std::sort(found_locations.begin(), found_locations.end(), RectLess());
    SANITY_CHECK(found_locations, 3);
}
예제 #16
0
OCL_TEST_P(HOG, GetDescriptors)
{
    HOGDescriptor hog;
    hog.gammaCorrection = true;

    hog.setSVMDetector(hog.getDefaultPeopleDetector());

    std::vector<float> cpu_descriptors;
    std::vector<float> gpu_descriptors;

    OCL_OFF(hog.compute(img, cpu_descriptors, hog.winSize));
    OCL_ON(hog.compute(uimg, gpu_descriptors, hog.winSize));

    Mat cpu_desc(cpu_descriptors), gpu_desc(gpu_descriptors);

    EXPECT_MAT_SIMILAR(cpu_desc, gpu_desc, 1e-1);
}
JNIEXPORT void JNICALL Java_org_opencv_objdetect_HOGDescriptor_setSVMDetector_10
  (JNIEnv* env, jclass , jlong self, jlong _svmdetector_nativeObj)
{
    static const char method_name[] = "objdetect::setSVMDetector_10()";
    try {
        LOGD("%s", method_name);
        HOGDescriptor* me = (HOGDescriptor*) self; //TODO: check for NULL
        Mat& _svmdetector = *((Mat*)_svmdetector_nativeObj);
        me->setSVMDetector( _svmdetector );
        return;
    } catch(const std::exception &e) {
        throwJavaException(env, &e, method_name);
    } catch (...) {
        throwJavaException(env, 0, method_name);
    }
    return;
}
예제 #18
0
vector<Rect> detectPeople(Mat frame) {
  HOGDescriptor hog;
  vector<Rect> found, filtered;
  size_t i, j;

  hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
  hog.detectMultiScale(
    frame, 
    found, 
    0, 
    Size(8,8), 
    Size(32,32), 
    1.05, 
    2
  );

  for(i = 0; i < found.size(); i++){
    Rect r = found[i];
    
    for(j = 0; j < found.size(); j++) 
      if(j != i && (r & found[j]) == r)
        break;
    
    if(j == found.size())
      filtered.push_back(r);
  }

  for(i = 0; i < filtered.size(); i++){
    Rect r = filtered[i];
    
    r.x += cvRound(r.width*0.1);
    r.width = cvRound(r.width*0.8);
    r.y += cvRound(r.height*0.07);
    r.height = cvRound(r.height*0.8);
    
    rectangle(frame, r.tl(), r.br(), Scalar(0,255,0), 1);        
  }

  cout << "--------------------------------------------" << endl;
  cout << "function: detectPeople" << endl;
  cout << "--------------------------------------------" << endl;

  return filtered;
}
예제 #19
0
void classify()
{
    HOGDescriptor hog;
    hog.winSize = Size(sampleSize);
    SVMLight::SVMClassifier c("classifier.dat");
    std::vector<float> descriptorVector = c.getDescriptorVector();
    std::cout << descriptorVector.size() << std::endl;
    hog.setSVMDetector(descriptorVector);

    Mat m = imread("/Users/alberto/tmp/samples/fullframe9.png");
    Mat m1 = m.clone();

    std::vector<Rect> found;
    std::vector<Point> foundPoint;
    Size padding(Size(0, 0));

    std::cout << "try to detect.." << std::endl;
    //hog.detectMultiScale(m, found, 0.0, winStride, padding, 1.01, 0.1);
    hog.detect(m, foundPoint, 0.0, winStride, padding);
    std::cout << "found: " << foundPoint.size() << std::endl;


    for(int i=0; i<foundPoint.size(); ++i) {
        Rect r;
        r.x = foundPoint[i].x;
        r.y = foundPoint[i].y;
        r.width = 48;
        r.height = 48;
        rectangle(m, r, Scalar(255,255,255));

        Mat imageroi = m1(r);
        std::stringstream ss;
        ss << "/Users/alberto/tmp/samples/tmp/test";
        ss << i;
        ss << ".png";
        cv::imwrite(ss.str(), imageroi);

    }

    imshow("result", m);
}
예제 #20
0
GaussianMixture* HogPeopleDetector::process(int offsetX=0, int offsetY=0, double scale=1) {

    detectedROI.clear();
    detectedCENTER.clear();
    detectedSCALES.clear();
    detectedIDS.clear();

    //gets the detections
    vector<cv::Rect> rois;
    vector<cv::Point> points;
    HOGDescriptor hogDesc;//the detector
    hogDesc.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());//we use a people detector

    //cv::Mat m (this->imgBank->imgSRC);
    cv::gpu::GpuMat gpuMat(this->imgBank->imgSRC);

    hogDesc.detectMultiScale(gpuMat, rois, 0, Size(8, 8),
                             Size(32,32), 1.05, 2.5);

    //fills the gaussian mixture with detections
    for(int i = 0 ; i < rois.size() ; i++) {
        cv::Rect r = rois[i];
        detectedROI.push_back(r);
        cv::Point p;
        p.x = r.x + r.width*0.5;
        p.y = r.y + r.height*0.5;
        detectedCENTER.push_back(p);
        this->gm->glist[i]->mean[0] = p.x;
        this->gm->glist[i]->mean[1] = p.y;
    }
    this->nbdetected = rois.size();

    gm->curnb = rois.size();
    return gm;

}
예제 #21
0
void initializeHog() {
	LOGD("initialize hog");
	hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
}
int CVHOGDetector::main(int argc, char** argv)
{
    string filename;
    string savefilename;
    if( argc == 1 )
    {
        printf("Usage: peopledetect video_filename\n");
        //        filename="/mnt/hgfs/I/Win7/dataset/大王卡口全景东向西_大王卡口全景东向西/大王卡口全景东向西_大王卡口全景东向西_20150318090325.mp4";
        filename="/media/yzbx/15311446439/测试视频/12.wmv";
    }
    else if(argc ==2){
        filename=argv[1];
    }
    else{
        filename=argv[1];
        savefilename=argv[2];
    }
    VideoCapture cap(filename);
    if(!cap.isOpened()){
        std::cout<<"cannot open video file "<<filename<<endl;
    }

    HOGDescriptor hog;
    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
    namedWindow("people detector", 1);
    cv::VideoWriter videoWriter;
    bool firstTimeToOpen=true;
    for(;;)
    {
        Mat img;
        cap>>img;

        if(!img.data){
            std::cout<<"empty image"<<endl;
            break;
        }
        //        cv::resize(img,img,Size(0,0),0.5,0.5);

        vector<Rect> found, found_filtered;
        double t = (double)getTickCount();
        // run the detector with default parameters. to get a higher hit-rate
        // (and more false alarms, respectively), decrease the hitThreshold and
        // groupThreshold (set groupThreshold to 0 to turn off the grouping completely).
        hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
        t = (double)getTickCount() - t;
        printf("tdetection time = %gms\n", t*1000./cv::getTickFrequency());
        size_t i, j;
        for( i = 0; i < found.size(); i++ )
        {
            Rect r = found[i];
            for( j = 0; j < found.size(); j++ )
                if( j != i && (r & found[j]) == r)
                    break;
            if( j == found.size() )
                found_filtered.push_back(r);
        }
        for( i = 0; i < found_filtered.size(); i++ )
        {
            Rect r = found_filtered[i];
            // the HOG detector returns slightly larger rectangles than the real objects.
            // so we slightly shrink the rectangles to get a nicer output.
            r.x += cvRound(r.width*0.1);
            r.width = cvRound(r.width*0.8);
            r.y += cvRound(r.height*0.07);
            r.height = cvRound(r.height*0.8);
            rectangle(img, r.tl(), r.br(), cv::Scalar(0,255,0), 3);
        }

        imshow("people detector", img);
        int c = waitKey(30) & 255;
        if(argc>2){
            if(firstTimeToOpen){
                cv::Size s=img.size();
                assert(videoWriter.open(savefilename,CV_FOURCC('D', 'I', 'V', 'X'),50,s,true));
                firstTimeToOpen=false;
            }

            assert(videoWriter.isOpened());
            videoWriter<<img;
        }
        else{
            if( c == 'q' || c == 'Q')
                break;
        }
    }

    return 0;
}
int main (int argc, const char * argv[])
{
    VideoCapture cap2(0);
    cap2.set(CV_CAP_PROP_FRAME_WIDTH, 320);
    cap2.set(CV_CAP_PROP_FRAME_HEIGHT, 240);
 
	bool isOnline=true;
	
	
	
	if(isOnline)
	argv[1]="test.avi";

	cv::VideoCapture cap(argv[1]); 
	if(!isOnline)
		cap=cap2;
    if (!cap.isOpened())
	{printf("Failed to open file....");return -1;}
 
    Mat img;
    namedWindow("opencv", CV_WINDOW_AUTOSIZE);
    HOGDescriptor hog;
    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
	int frameskip=0;
	int detection=0;
    while (true)
    {
		frameskip+=10;
		int people_in_the_back=frameskip%35;
		cap.set(1,frameskip);
        cap >> img;
		

		if (img.empty())
            continue;
 
        vector<Rect> found, found_filtered;
        hog.detectMultiScale(img, found, 0, Size(4,4), Size(16,16), 1.05, 3);
        size_t i, j;
		if(frameskip<40)
		detection =found.size()*4+7;
		else
			detection=found.size()*4+people_in_the_back;
		printf("\n Frame Number =%d People= %d",frameskip,detection);
        for (i=0; i<found.size(); i++) 
        {
            Rect r = found[i];
            for (j=0; j<found.size(); j++) 
                if (j!=i && (r & found[j]) == r)
                    break;
            if (j== found.size())
                found_filtered.push_back(r);
        }
 
        for (i=0; i<found_filtered.size(); i++) 
        {
            Rect r = found_filtered[i];
            r.x += cvRound(r.width*0.1);
		    r.width = cvRound(r.width*0.8);
		    r.y += cvRound(r.height*0.07);
		    r.height = cvRound(r.height*0.8);
		    rectangle(img, r.tl(), r.br(), Scalar(0,255,0), 3);        
        }
 
        imshow("opencv", img);
        if (waitKey(5)>=0)
            break;
    }
    return 0;
}
예제 #24
0
/**
 * Main program entry point
 * @param argc unused
 * @param argv unused
 * @return EXIT_SUCCESS (0) or EXIT_FAILURE (1)
 */
int main(int argc, char** argv) {

    // <editor-fold defaultstate="collapsed" desc="Init">
    HOGDescriptor hog; // Use standard parameters here
    hog.winSize = Size(104, 40); // Default training images size as used in paper
    // Get the files to train from somewhere
    static vector<string> positiveTrainingImages;
    static vector<string> negativeTrainingImages;
    static vector<string> validExtensions;
    validExtensions.push_back("jpg");
    validExtensions.push_back("png");
    validExtensions.push_back("ppm");
    validExtensions.push_back("pgm");
    // </editor-fold>

    // <editor-fold defaultstate="collapsed" desc="Read image files">
    getFilesInDirectory(posSamplesDir, positiveTrainingImages, validExtensions);
    getFilesInDirectory(negSamplesDir, negativeTrainingImages, validExtensions);
    /// Retrieve the descriptor vectors from the samples
    unsigned long overallSamples = positiveTrainingImages.size() + negativeTrainingImages.size();
    // </editor-fold>
    
    // <editor-fold defaultstate="collapsed" desc="Calculate HOG features and save to file">
    // Make sure there are actually samples to train
    if (overallSamples == 0) {
        printf("No training sample files found, nothing to do!\n");
        return EXIT_SUCCESS;
    }

    /// @WARNING: This is really important, some libraries (e.g. ROS) seems to set the system locale which takes decimal commata instead of points which causes the file input parsing to fail
    setlocale(LC_ALL, "C"); // Do not use the system locale
    setlocale(LC_NUMERIC,"C");
    setlocale(LC_ALL, "POSIX");

    printf("Reading files, generating HOG features and save them to file '%s':\n", featuresFile.c_str());
    float percent;
    /**
     * Save the calculated descriptor vectors to a file in a format that can be used by SVMlight for training
     * @NOTE: If you split these steps into separate steps: 
     * 1. calculating features into memory (e.g. into a cv::Mat or vector< vector<float> >), 
     * 2. saving features to file / directly inject from memory to machine learning algorithm,
     * the program may consume a considerable amount of main memory
     */ 
    fstream File;
    File.open(featuresFile.c_str(), ios::out);
    if (File.good() && File.is_open()) {
		// Remove following line for libsvm which does not support comments
        // File << "# Use this file to train, e.g. SVMlight by issuing $ svm_learn -i 1 -a weights.txt " << featuresFile.c_str() << endl;
        // Iterate over sample images
        for (unsigned long currentFile = 0; currentFile < overallSamples; ++currentFile) {
            storeCursor();
            vector<float> featureVector;
            // Get positive or negative sample image file path
            const string currentImageFile = (currentFile < positiveTrainingImages.size() ? positiveTrainingImages.at(currentFile) : negativeTrainingImages.at(currentFile - positiveTrainingImages.size()));
            // Output progress
            if ( (currentFile+1) % 10 == 0 || (currentFile+1) == overallSamples ) {
                percent = ((currentFile+1) * 100 / overallSamples);
                printf("%5lu (%3.0f%%):\tFile '%s'", (currentFile+1), percent, currentImageFile.c_str());
                fflush(stdout);
                resetCursor();
            }
            // Calculate feature vector from current image file
            calculateFeaturesFromInput(currentImageFile, featureVector, hog);
            if (!featureVector.empty()) {
                /* Put positive or negative sample class to file, 
                 * true=positive, false=negative, 
                 * and convert positive class to +1 and negative class to -1 for SVMlight
                 */
                File << ((currentFile < positiveTrainingImages.size()) ? "+1" : "-1");
                // Save feature vector components
                for (unsigned int feature = 0; feature < featureVector.size(); ++feature) {
                    File << " " << (feature + 1) << ":" << featureVector.at(feature);
                }
                File << endl;
            }
        }
        printf("\n");
        File.flush();
        File.close();
    } else {
        printf("Error opening file '%s'!\n", featuresFile.c_str());
        return EXIT_FAILURE;
    }
    // </editor-fold>

    // <editor-fold defaultstate="collapsed" desc="Pass features to machine learning algorithm">
    /// Read in and train the calculated feature vectors
    printf("Calling %s\n", TRAINHOG_SVM_TO_TRAIN::getInstance()->getSVMName());
    TRAINHOG_SVM_TO_TRAIN::getInstance()->read_problem(const_cast<char*> (featuresFile.c_str()));
    TRAINHOG_SVM_TO_TRAIN::getInstance()->train(); // Call the core libsvm training procedure
    printf("Training done, saving model file!\n");
    TRAINHOG_SVM_TO_TRAIN::getInstance()->saveModelToFile(svmModelFile);
    // </editor-fold>

    // <editor-fold defaultstate="collapsed" desc="Generate single detecting feature vector from calculated SVM support vectors and SVM model">
    printf("Generating representative single HOG feature vector using svmlight!\n");
    vector<float> descriptorVector;
    vector<unsigned int> descriptorVectorIndices;
    // Generate a single detecting feature vector (v1 | b) from the trained support vectors, for use e.g. with the HOG algorithm
    TRAINHOG_SVM_TO_TRAIN::getInstance()->getSingleDetectingVector(descriptorVector, descriptorVectorIndices);
    // And save the precious to file system
    saveDescriptorVectorToFile(descriptorVector, descriptorVectorIndices, descriptorVectorFile);
    // </editor-fold>

    // <editor-fold defaultstate="collapsed" desc="Test detecting vector">
    // Detector detection tolerance threshold
    const double hitThreshold = TRAINHOG_SVM_TO_TRAIN::getInstance()->getThreshold();
    // Set our custom detecting vector
    hog.setSVMDetector(descriptorVector);
    
    printf("Testing training phase using training set as test set (just to check if training is ok - no detection quality conclusion with this!)\n");
    detectTrainingSetTest(hog, hitThreshold, positiveTrainingImages, negativeTrainingImages);
    
    /*
    printf("Testing custom detection using camera\n");
    VideoCapture cap(0); // open the default camera
    if(!cap.isOpened()) { // check if we succeeded
        printf("Error opening camera!\n");
        return EXIT_FAILURE;
    }*/
    
    Mat testImage;
    int num_of_images = 170;
    
    
    /***** Hard-coded values and filenames ********/
    
    FILE* fp = fopen("boundingboxesgrp1.txt", "w");
    for(int i=0; i < num_of_images; i++)
    {
	char filename[50];
	char filename2[50];
	
	vector<Rect> boxes;
	vector<double> weights;
	
	sprintf(filename, "TestImages/test-%d.pgm", i);
	testImage = imread(filename, 0);
	//printf("%d\n", i);
	detectTest(hog, hitThreshold, testImage, boxes, weights);
	//hitThreshold = 0;
	//detectTest(hog, 0, testImage, boxes, weights);
	printf("BOXES: %ld \t WEIGHTS: %ld\n", boxes.size(), weights.size());
	sprintf(filename2, "results/image-%d.pgm", i);
	imwrite(filename2, testImage);
	for(int j=0; j < boxes.size(); j++)
	{
	    fprintf(fp, "%s %d %d %d %d %f\n", filename, boxes[j].x, boxes[j].y, boxes[j].width, boxes[j].height, weights[j]);
	}
	printf("Threshold: %f\n", hitThreshold); 
    }
    fclose(fp);
    
    return EXIT_SUCCESS;
}
예제 #25
0
파일: DetectCode.cpp 프로젝트: hfr1988/DEye
void test_video(const Size & size)
{
	char key = 27;
	Mat img, draw;
	Ptr<SVM> svm;
	HOGDescriptor hog;
	hog.winSize = size;
	vector< Rect > locations;
	vector< Rect > found_filtered;

	// Load the trained SVM.
	svm = StatModel::load<SVM>(TRAINED_SVM);
	// Set the trained svm to my_hog
	vector< float > hog_detector;
	get_svm_detector(svm, hog_detector);
	hog.setSVMDetector(hog_detector);
	printHOGParams(hog);

	VideoCapture video;
	// Open the video file.
	video.open(TRAFFIC_VIDEO_FILE);
	if (!video.isOpened())
	{
		cerr << "Unable to open the device" << endl;
		exit(-1);
	}
	// Get the frame rate
	double rate = video.get(CV_CAP_PROP_FPS);
	cout << " Frame rate : " << rate << endl;
	cout << " Input video codec :" << video.get(CV_CAP_PROP_FOURCC);
	// initilaize the video writer object to write the video output
	std::string outputFile(OUT_Video_File);
	VideoWriter writer;
	int codec = static_cast<int>(video.get(CV_CAP_PROP_FOURCC));
	//int codec = CV_FOURCC('M', 'J', 'P', 'G');
	bool isWriterInitialized = false;

	int num_of_vehicles = 0;
	bool end_of_process = false;
	while (!end_of_process)
	{
		video >> img;
		if (img.empty())
			break;


		draw = img.clone();
		Mat cropped;
		cv::resize(draw, cropped, Size(720, 560));

		Mat temp, temp3;
		cvtColor(cropped, temp, COLOR_BGR2GRAY);
		/*Mat bgr[3];   //destination array
		split(temp3,bgr);//split source
		temp = bgr[0]+bgr[2];
		*/
		if (isWriterInitialized) {
			//execute only once
			isWriterInitialized = true;
			/*writer.open(outputFile,
			capture.get(CV_CAP_PROP_FOURCC),
			capture.get(CV_CAP_PROP_FPS),
			Size(capture.get(CV_CAP_PROP_FRAME_WIDTH),capture.get(CV_CAP_PROP_FRAME_HEIGHT)),
			true);*/
			writer.open(outputFile, codec, rate, cropped.size(), true);
		}


		locations.clear();
		// Rect(x,y,w,h) w->width=cols;h->rows
		// first remove the upper 50% from height  Original Cropped =size(720,560)=(cols,rows)
		Mat roi = temp(Rect(0, temp.rows*0.5, temp.cols, temp.rows - temp.rows*0.5));
		//size(roi) = size(720,280)
		//cout<<"roi.size() = "<<roi.size()<<endl;
		int y_offset = temp.rows*0.5;
		//again crop the lower 10 % to remove the images near dashboard-> remove false positives
		roi = roi(Rect(0, 0, roi.cols, roi.rows - 100));
		//cout<<"roi.size() = "<<roi.size()<<endl;
		//no offset required as this is the lower row colums.

		//hog.detectMultiScale(roi, locations);
		//hog.detectMultiScale(roi, locations, 1, Size(50, 50), Size(32, 32), 1, 2);//对图片进行多尺度行人检测

		hog.detectMultiScale(roi, locations, 0.00, Size(4, 8), Size(0, 0), 1.05, 2);//less false positive
																					//hog.detectMultiScale(roi, locations, 0.00, Size(8,8), Size(0,0), 1.05, 2);// less true negative(missed)
																					// add the offset



		std::vector<Rect>::iterator it = locations.begin();
		std::vector<Rect>::iterator itend = locations.end();
		vector<Rect> actuallocations;
		bool isVehicle = false;
		for (; it != itend; it++)
		{
			Rect current = *it;
			//cout<<" Rect current = "<< current<<endl;
			//cout<<" roi size= "<<roi.size()<<endl;
			Mat roi2Check = roi(Rect(current.x, current.y, current.width, current.height));//define a roi of 50x50
																						   //cout<<" roi2Check size= "<<roi2Check.size()<<endl;
			isVehicle = checkIfpatchIsVehicle(roi2Check);
			if (isVehicle)
				actuallocations.push_back(Rect(current.x, current.y + y_offset, current.width, current.height));
		}
		if (0 != actuallocations.size())
			draw_locations(cropped, actuallocations, Scalar(0, 255, 0));

		imshow(WINDOW_NAME, cropped);





		if (save_video)
			writer.write(cropped);
		//wait  infinite fro keypress
		key = (char)waitKey(3);
		if (27 == key)
			end_of_process = true;
	}
	// Close the video file.
	// Not required since called by destructor
	writer.release();
	video.release();
}
예제 #26
0
파일: main.cpp 프로젝트: bmichell/FragilIT
int main(int argc, char** argv)
{
    VideoCapture cap(argv[1]); // open the default camera
    if(!cap.isOpened())  // check if we succeeded
    {
        return -1;
    }
    HOGDescriptor hog;
    Mat mono_img;
    hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
    namedWindow("test",1);
    std::vector<Rect> found;
    std::vector<Rect> foundsave;
    std::vector<std::vector<Rect> > found_filtered(3,std::vector<Rect>(0));
    int k = 0;
    Mat frame;
    Mat rW;
    Rect save(Point_<int>(0,0), Point_<int>(0,0));
    Point2d prevPos;
    std::vector<Point2d> src(4);
    src[0] = Point2d(263,281); src[1] = Point2d(7,265); src[2] = Point2d(357,147); src[3] = Point2d(161,147);
    std::vector<Point2d> dest(4);
    dest[0] = Point2d(3,0); dest[1] = Point2d(0,0); dest[2] = Point2d(3,4.5); dest[3] = Point2d(0,3);
    Mat H;
    H = findHomography(src,dest,H);
    for(;;)
    {
        foundsave = found_filtered[k%3];
        found_filtered[k%3] = std::vector<Rect>(0);
        cap >> frame; // get a new frame from camera
        if(k==0)
        {
            rW = frame;
            warpPerspective(frame,rW,H,Size(1000,1000));
            imwrite("/home/benjamin.michelland/calibrated.png",rW);
        }
        cvtColor(frame, mono_img, CV_BGR2GRAY);
        hog.detectMultiScale(mono_img, found,-0.5);
        size_t i, j;
        for (i=0; i<found.size(); i++)
        {
            Rect r = found[i];
            for (j=0; j<found.size(); j++)
                if (j!=i && (r & found[j])==r)
                    break;
            if (j==found.size())
                found_filtered[k%3].push_back(r);
        }
        if(k>=3)
        {
            if(found_filtered[k%3].empty())
            {
                found_filtered[k%3] = foundsave;
            }
            if(!found_filtered[0].empty() && !found_filtered[1].empty() && !found_filtered[2].empty())
            {
                std::vector<int> list = threeNearest(save,found_filtered);
                for(unsigned i = 0; i < found_filtered[0].size(); i++) {
                    if(list[0] != -1 && list[1] != -1 && list[2] !=-1)
                    {
                        Rect r1 = found_filtered[0][list[0]];
                        Rect r2 = found_filtered[1][list[1]];
                        Rect r3 = found_filtered[2][list[2]];
                        Point_<int> c1 = r1.tl() + Point_<int>(r1.width/2,r1.height-10);
                        c1 = c1 + r2.tl() + Point_<int>(r2.width/2,r2.height-10);
                        c1 = c1 + r3.tl() + Point_<int>(r3.width/2,r3.height-10);
                        c1 = Point_<int>(c1.x/3.0,c1.y/3.0);
                        rectangle(frame, c1, c1, Scalar(0,255,0), 2);
                        save = Rect(c1,c1);
                    }
                }
                float speed = 0;
                if(k%25==0)
                {
                    std::vector<Point2d> pix(1);
                    pix[0] = save.tl();
                    std::vector<Point2d> pos(1);
                    perspectiveTransform(pix,pos,H);
                if(init)
                {
                    speed = sqrt((pos[0]-prevPos).dot(pos[0]-prevPos));
                    std::cout << std::setprecision(3) << speed << " m/s" << std::endl;
                }
                prevPos=pos[0];
                init = true;
                }
            }
        }
        imshow("test", frame);
        if(waitKey(30) >= 0) break;
        k++;
    }
    // the camera will be deinitialized automatically in VideoCapture destructor
    return 0;
}
예제 #27
0
int main(int argc, char ** argv)
{
	VideoCapture cap(0);

	//set the width and the height of frame captured from this camera
	//cap.set(CV_CAP_PROP_FRAME_WIDTH,320);
	//cap.set(CV_CAP_PROP_FRAME_HEIGHT,240);
	if(!cap.isOpened())
	{
		cout<<"Error: Failed to open camera"<<endl;
		return -1;
	}

	Mat img;
	HOGDescriptor hog;
	//use the default People Detector
	cout<<"Setting the default people detector"<<endl;
	hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
	cout<<"Setting done"<<endl;
	
	cout<<"Creatting a Window"<<endl;
	namedWindow("Video Capture",CV_WINDOW_AUTOSIZE);
	cout<<"Creation done"<<endl;

	while(true)
	{
		printf("Getting an image\n");
		for(unsigned char count = 0; count < 10;count++)
		{
			//printf("Grabbing Frame from file\n");
			if(!cap.grab())
			{
					printf("Error: Cannot grab frame from file\n");
					count--;
					continue;
			}
			//printf("Retrieving Frame into Mat\n");
			if(!cap.retrieve(img))
			{
					printf("Error: Cannot retrieve frame into Mat\n");
					count--;
					continue;
			}
		}
		printf("Image got\n");
		if(!img.data)
		{
			cout<<"Currently no valid data"<<endl;
			continue;
		}
		cout<<"Capture valid frame"<<endl;
		vector<Rect> found, found_filtered;
		/*
		 *
		 *void HOGDescriptor::detectMultiScale(
		 *						const Mat& img, vector<Rect>& foundLocations,
		 *						double hitThreshold, Size winStride, Size padding,
		 *						double scale0, double finalThreshold, bool useMeanshiftGrouping) const
		 *
		 */
		hog.detectMultiScale(img,found,0,Size(8,8),Size(32,32),1.05,2);

		size_t i,j;
		//first for loop
		cout<<"Entering first for loop"<<endl;
		for(i = 0; i < found.size();i++)
		{
			Rect r = found[i];
			for (j=0; j<found.size(); j++)
				if (j!=i && (r & found[j])==r)
					break;
			if (j==found.size())
				found_filtered.push_back(r);
		}
		cout<<"Entering second for loop"<<endl;
		//second for loop
		for(i = 0; i < found_filtered.size();i++)
		{
			Rect r = found_filtered[i];
			r.x += cvRound(r.width * 0.1);
			r.width = cvRound(r.width * 0.8);
			r.y += cvRound(r.height * 0.06);
			r.height = cvRound(r.height * 0.9);
			rectangle(img,r.tl(),r.br(),cv::Scalar(0,255,0),2);
		}
		imshow("Video Capture",img);
		if(waitKey(20) >= 0)
			break;
	}
	return 0;
}	
예제 #28
0
int main (int arg, char * argv[])
{

    char * val = "";

	if (arg > 1)
	{
		val = argv[1];
	}
	else
	{
		//char szFile[100];
		//OPENFILENAME ofn;
		//ZeroMemory( &ofn , sizeof( ofn));
		//ofn.lStructSize = sizeof ( ofn );
		//ofn.hwndOwner = NULL  ;
		//wchar_t wFile[100];
		//mbstowcs(wFile, szFile, strlen(szFile) + 1);
		//ofn.lpstrFile = wFile;
		//ofn.lpstrFile[0] = L'\0';
		//ofn.nMaxFile = sizeof( szFile );
		//ofn.lpstrFilter = L"All\0*.*\0Text\0*.TXT\0";
		//ofn.nFilterIndex =1;
		//ofn.lpstrFileTitle = NULL ;
		//ofn.nMaxFileTitle = 0 ;
		//ofn.lpstrInitialDir=NULL ;
		//ofn.Flags = OFN_PATHMUSTEXIST|OFN_FILEMUSTEXIST ;
		//GetOpenFileName( &ofn );

		//val = new char[wcslen(ofn.lpstrFile) + 1];
		//wsprintfA(val, "%S", ofn.lpstrFile);
	}

	try
	{
		IplImage *image = cvLoadImage(val); //"f:\\test\\11.jpg"
		Mat img(image);
		HOGDescriptor hog;
		hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
 
 
		vector<Rect> found, found_filtered;
		hog.detectMultiScale(img, found, 0, Size(8,8), Size(32,32), 1.05, 2);
		//hog.detectMultiScale(img, found, 0, Size(6,6), Size(32,32), 1.05, 0, true);
 
		size_t i, j;
		for (i=0; i<found.size(); i++)
		{
			Rect r = found[i];
			for (j=0; j<found.size(); j++)
				if (j!=i && (r & found[j])==r)
					break;
			if (j==found.size())
				found_filtered.push_back(r);
		}
		for (i=0; i<found_filtered.size(); i++)
		{
			Rect r = found_filtered[i];

			r.x += cvRound(r.width*0.1);
			r.width = cvRound(r.width*0.8);
			//r.y += cvRound(r.height*0.06);
			r.height = cvRound(r.height*0.9);
			rectangle(img, r.tl(), r.br(), cv::Scalar(0,0,255), 1);
		}

		namedWindow("Image Viewer", CV_WINDOW_AUTOSIZE);
		imshow("Image Viewer", img);
		waitKey(0);
	}
	catch (exception e)
	{

	}

    return 0;
}
예제 #29
0
파일: main.cpp 프로젝트: mlilien/trainHOG
/**
 * Main program entry point
 * @param argc unused
 * @param argv unused
 * @return EXIT_SUCCESS (0) or EXIT_FAILURE (1)
 */
int main(int argc, char** argv) {

    // <editor-fold defaultstate="collapsed" desc="Init">
    HOGDescriptor hog; // Use standard parameters here
    hog.winSize = Size(64, 128); // Default training images size as used in paper
    // Get the files to train from somewhere
    static vector<string> positiveTrainingImages;
    static vector<string> negativeTrainingImages;
    static vector<string> validExtensions;
    validExtensions.push_back("jpg");
    validExtensions.push_back("png");
    validExtensions.push_back("ppm");
    // </editor-fold>

    // <editor-fold defaultstate="collapsed" desc="Read image files">
    getFilesInDirectory(posSamplesDir, positiveTrainingImages, validExtensions);
    getFilesInDirectory(negSamplesDir, negativeTrainingImages, validExtensions);
    /// Retrieve the descriptor vectors from the samples
    unsigned long overallSamples = positiveTrainingImages.size() + negativeTrainingImages.size();
    // </editor-fold>
    
    // <editor-fold defaultstate="collapsed" desc="Calculate HOG features and save to file">
    // Make sure there are actually samples to train
    if (overallSamples == 0) {
        printf("No training sample files found, nothing to do!\n");
        return EXIT_SUCCESS;
    }

    /// @WARNING: This is really important, some libraries (e.g. ROS) seems to set the system locale which takes decimal commata instead of points which causes the file input parsing to fail
    setlocale(LC_ALL, "C"); // Do not use the system locale
    setlocale(LC_NUMERIC,"C");
    setlocale(LC_ALL, "POSIX");

    printf("Reading files, generating HOG features and save them to file '%s':\n", featuresFile.c_str());
    float percent;
    /**
     * Save the calculated descriptor vectors to a file in a format that can be used by SVMlight for training
     * @NOTE: If you split these steps into separate steps: 
     * 1. calculating features into memory (e.g. into a cv::Mat or vector< vector<float> >), 
     * 2. saving features to file / directly inject from memory to machine learning algorithm,
     * the program may consume a considerable amount of main memory
     */ 
    fstream File;
    File.open(featuresFile.c_str(), ios::out);
    if (File.good() && File.is_open()) {
        #if TRAINHOG_USEDSVM == SVMLIGHT
            // Remove following line for libsvm which does not support comments
            File << "# Use this file to train, e.g. SVMlight by issuing $ svm_learn -i 1 -a weights.txt " << featuresFile.c_str() << endl;
        #endif
        // Iterate over sample images
        for (unsigned long currentFile = 0; currentFile < overallSamples; ++currentFile) {
            storeCursor();
            vector<float> featureVector;
            // Get positive or negative sample image file path
            const string currentImageFile = (currentFile < positiveTrainingImages.size() ? positiveTrainingImages.at(currentFile) : negativeTrainingImages.at(currentFile - positiveTrainingImages.size()));
            // Output progress
            if ( (currentFile+1) % 10 == 0 || (currentFile+1) == overallSamples ) {
                percent = ((currentFile+1) * 100 / overallSamples);
                printf("%5lu (%3.0f%%):\tFile '%s'", (currentFile+1), percent, currentImageFile.c_str());
                fflush(stdout);
                resetCursor();
            }
            // Calculate feature vector from current image file
            calculateFeaturesFromInput(currentImageFile, featureVector, hog);
            if (!featureVector.empty()) {
                /* Put positive or negative sample class to file, 
                 * true=positive, false=negative, 
                 * and convert positive class to +1 and negative class to -1 for SVMlight
                 */
                File << ((currentFile < positiveTrainingImages.size()) ? "+1" : "-1");
                // Save feature vector components
                for (unsigned int feature = 0; feature < featureVector.size(); ++feature) {
                    File << " " << (feature + 1) << ":" << featureVector.at(feature);
                }
                File << endl;
            }
        }
        printf("\n");
        File.flush();
        File.close();
    } else {
        printf("Error opening file '%s'!\n", featuresFile.c_str());
        return EXIT_FAILURE;
    }
    // </editor-fold>

    // <editor-fold defaultstate="collapsed" desc="Pass features to machine learning algorithm">
    /// Read in and train the calculated feature vectors
    printf("Calling %s\n", TRAINHOG_SVM_TO_TRAIN::getInstance()->getSVMName());
    TRAINHOG_SVM_TO_TRAIN::getInstance()->read_problem(const_cast<char*> (featuresFile.c_str()));
    TRAINHOG_SVM_TO_TRAIN::getInstance()->train(); // Call the core libsvm training procedure
    printf("Training done, saving model file!\n");
    TRAINHOG_SVM_TO_TRAIN::getInstance()->saveModelToFile(svmModelFile);
    // </editor-fold>

    // <editor-fold defaultstate="collapsed" desc="Generate single detecting feature vector from calculated SVM support vectors and SVM model">
    printf("Generating representative single HOG feature vector using svmlight!\n");
    vector<float> descriptorVector;
    vector<unsigned int> descriptorVectorIndices;
    // Generate a single detecting feature vector (v1 | b) from the trained support vectors, for use e.g. with the HOG algorithm
    TRAINHOG_SVM_TO_TRAIN::getInstance()->getSingleDetectingVector(descriptorVector, descriptorVectorIndices);
    // And save the precious to file system
    saveDescriptorVectorToFile(descriptorVector, descriptorVectorIndices, descriptorVectorFile);
    // </editor-fold>

    // <editor-fold defaultstate="collapsed" desc="Test detecting vector">
    // Detector detection tolerance threshold
    const double hitThreshold = TRAINHOG_SVM_TO_TRAIN::getInstance()->getThreshold();
	printf("Detector hitThreshold is: %f\r\n", hitThreshold);
	
    // Set our custom detecting vector
    hog.setSVMDetector(descriptorVector);
    hog.save(cvHOGFile);
	// save hitThreshold to HOG yaml file
	File.open(cvHOGFile.c_str(), std::ios_base::app);
    if (File.good() && File.is_open()) {
        File << "hitThreshold: " << hitThreshold << std::endl;
        File.flush();
        File.close();
	}		
    else {
        printf("Error opening file '%s'!\n", featuresFile.c_str());
        return EXIT_FAILURE;
    }
 	
    printf("Testing training phase using training set as test set (just to check if training is ok - no detection quality conclusion with this!)\n");
    detectTrainingSetTest(hog, hitThreshold, positiveTrainingImages, negativeTrainingImages);

    /**    
    printf("Testing custom detection using camera\n");
    VideoCapture cap(0); // open the default camera
    if(!cap.isOpened()) { // check if we succeeded
        printf("Error opening camera!\n");
        return EXIT_FAILURE;
    }
    Mat testImage;
    while ((cvWaitKey(10) & 255) != 27) {
        cap >> testImage; // get a new frame from camera
//        cvtColor(testImage, testImage, CV_BGR2GRAY); // If you want to work on grayscale images
        detectTest(hog, hitThreshold, testImage);
        imshow("HOG custom detection", testImage);
    }
    */
    // </editor-fold>

    return EXIT_SUCCESS;
}
예제 #30
0
void hogSVMTrain(HOGDescriptor &myHOG,bool TRAIN,bool HORZ)
{
	
	//检测窗口(64,128),块尺寸(16,16),块步长(8,8),cell尺寸(8,8),直方图bin个数9
  //HOGDescriptor hog_vertical(Size(15,30),Size(5,10),Size(5,5),Size(5,5),9,1,-1.0,0,0.2,true,32);//HOG检测器,用来计算HOG描述子的
  //HOGDescriptor hog_horz(Size(30,15),Size(10,5),Size(5,5),Size(5,5),9,1,-1.0,0,0.2,true,32);
	HOGDescriptor hog_vertical(Win_vertical,block_vertical,blockStride_vertical,cell_vertical,9,1,-1.0,0,0.2,true,64);//HOG检测器,用来计算HOG描述子的
	HOGDescriptor hog_horz(Size(36,12),Size(12,6),Size(6,6),Size(6,6),9,1,-1.0,0,0.2,true,64);

  int DescriptorDim;//HOG描述子的维数,由图片大小、检测窗口大小、块大小、细胞单元中直方图bin个数决定
  MySVM svm;//SVM分类器
  int WinStride;
  //若TRAIN为true,重新训练分类器



  if(!HORZ)
  {
	  if(TRAIN)
  {
    string ImgName;//图片名(绝对路径)
   ifstream finPos("D:\\JY\\JY_TrainingSamples\\positive\\positivePath.txt");//正样本图片的文件名列表
   //ifstream finPos("D:\\JY\\JY_TrainingSamples\\Traffic Light Benchmark\\Positive\\pos.txt");//正样本图片的文件名列表
   //ifstream finNeg("D:\\Traffic Light Detection\\JY_TrainingSamples\\negetive1\\negetive1.txt");//负样本图片的文件名列表
   ifstream finNeg("D:\\JY\\JY_TrainingSamples\\negetive1\\negetive1.txt");//负样本图片的文件名列表
   

   Mat sampleFeatureMat;//所有训练样本的特征向量组成的矩阵,行数等于所有样本的个数,列数等于HOG描述子维数	
   Mat sampleLabelMat;//训练样本的类别向量,行数等于所有样本的个数,列数等于1;1表示有人,-1表示无人
	


    //依次读取正样本图片,生成HOG描述子

    for(int num=0; num<PosSamNO && getline(finPos,ImgName); num++)
    {
		cout<<"处理:"<<ImgName<<endl;
		Mat src = imread(ImgName);//读取图片

 
		//resize(src,src,Size(15,30));
		resize(src,src,Win_vertical);
		vector<float> descriptors;//HOG描述子向量
		hog_vertical.compute(src,descriptors,Size(8,8));//计算HOG描述子,检测窗口移动步长(8,8)
		cout<<"描述子维数:"<<descriptors.size()<<endl;

      //处理第一个样本时初始化特征向量矩阵和类别矩阵,因为只有知道了特征向量的维数才能初始化特征向量矩阵
      if( 0 == num )
      {
        DescriptorDim = descriptors.size();//HOG描述子的维数
        //初始化所有训练样本的特征向量组成的矩阵,行数等于所有样本的个数,列数等于HOG描述子维数sampleFeatureMat
		sampleFeatureMat = Mat::zeros(PosSamNO+NegSamNO+HardExampleNO, DescriptorDim, CV_32FC1);
        sampleLabelMat = Mat::zeros(PosSamNO+NegSamNO+HardExampleNO, 1, CV_32FC1);
		//sampleLabelMat = Mat::zeros(PosSamNO+5742*NegSamNO+HardExampleNO, 1, CV_32FC1);
      }

      //将计算好的HOG描述子复制到样本特征矩阵sampleFeatureMat
      for(int i=0; i<DescriptorDim; i++)
		sampleFeatureMat.at<float>(num,i) = descriptors[i];//第num个样本的特征向量中的第i个元素
      sampleLabelMat.at<float>(num,0) = 1;//正样本类别为1,有人
    }
	
    //依次读取负样本图片,生成HOG描述子

    for(int num=0; num<NegSamNO && getline(finNeg,ImgName); num++)
    {
      cout<<"处理:"<<ImgName<<endl;
      Mat src = imread(ImgName);//读取图片
	 
	 //resize(src,src,Size(15,30));
	 resize(src,src,Win_vertical);
	  //cvtColor(src,gray,CV_RGB2GRAY);
      vector<float> descriptors;//HOG描述子向量
	  hog_vertical.compute(src,descriptors,Size(8,8));//计算HOG描述子,检测窗口移动步长(8,8)
      cout<<"描述子维数:"<<descriptors.size()<<endl;

      //将计算好的HOG描述子复制到样本特征矩阵sampleFeatureMat
/*	 WinStride=descriptors.size()/DescriptorDim;
	  for(int j=0;j<WinStride;j++)
	  {
		  for(int i=0; i<DescriptorDim; i++)
				sampleFeatureMat.at<float>(j+num*WinStride+PosSamNO,i) = descriptors[i+DescriptorDim*j];//第PosSamNO+num个样本的特征向量中的第i个元素
		  sampleLabelMat.at<float>(j+num*WinStride+PosSamNO,0) = -1;//负样本类别为-1,无人
	  }*/


    for(int i=0; i<DescriptorDim; i++)
        sampleFeatureMat.at<float>(num+PosSamNO,i) = descriptors[i];//第PosSamNO+num个样本的特征向量中的第i个元素
      sampleLabelMat.at<float>(num+PosSamNO,0) = -1;//负样本类别为-1,无人
    }

    //处理HardExample负样本
    if(HardExampleNO > 0)
    {
      ifstream finHardExample("D:\\JY\\JY_TrainingSamples\\hardexample\\hardexample.txt");//HardExample负样本的文件名列表
      //依次读取HardExample负样本图片,生成HOG描述子
      for(int num=0; num<HardExampleNO && getline(finHardExample,ImgName); num++)
      {
        cout<<"处理:"<<ImgName<<endl;
       // ImgName = ImgName;//加上HardExample负样本的路径名
        Mat src = imread(ImgName);//读取图片

		//resize(src,src,Size(15,30));
		resize(src,src,Win_vertical);
		//cvtColor(src,gray,CV_RGB2GRAY);
        vector<float> descriptors;//HOG描述子向量
       hog_vertical.compute(src,descriptors,Size(8,8));//计算HOG描述子,检测窗口移动步长(8,8)
		//hog_vertical.compute(src,descriptors,Size(5,5));//计算HOG描述子,检测窗口移动步长(8,8)
        //cout<<"描述子维数:"<<descriptors.size()<<endl;

        //将计算好的HOG描述子复制到样本特征矩阵sampleFeatureMat
        for(int i=0; i<DescriptorDim; i++)
          sampleFeatureMat.at<float>(num+PosSamNO+NegSamNO,i) = descriptors[i];//第PosSamNO+num个样本的特征向量中的第i个元素
        sampleLabelMat.at<float>(num+PosSamNO+NegSamNO,0) = -1;//负样本类别为-1,无人
      }
    }

    ////输出样本的HOG特征向量矩阵到文件
    //ofstream fout("SampleFeatureMat.txt");
    //for(int i=0; i<PosSamNO+NegSamNO; i++)
    //{
    //	fout<<i<<endl;
    //	for(int j=0; j<DescriptorDim; j++)
    //		fout<<sampleFeatureMat.at<float>(i,j)<<"  ";
    //	fout<<endl;
    //}

    //训练SVM分类器
    //迭代终止条件,当迭代满1000次或误差小于FLT_EPSILON时停止迭代
    CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON);
    //SVM参数:SVM类型为C_SVC;线性核函数;松弛因子C=0.01
    CvSVMParams param(CvSVM::C_SVC, CvSVM::LINEAR, 0, 1, 0, 0.01, 0, 0, 0, criteria);
    cout<<"开始训练SVM分类器"<<endl;
    svm.train(sampleFeatureMat, sampleLabelMat, Mat(), Mat(), param);//训练分类器
    cout<<"训练完成"<<endl;
    svm.save("src\\SVM_HOG_Benchmark.xml");//将训练好的SVM模型保存为xml文件
	//svm.save("SVM_HOG_10_20.xml");
  }
  else //若TRAIN为false,从XML文件读取训练好的分类器
  {
    svm.load("src\\SVM_HOG_Benchmark.xml");//从XML文件读取训练好的SVM模型
	// svm.load("SVM_HOG_10_20.xml");
  }


  /*************************************************************************************************
  线性SVM训练完成后得到的XML文件里面,有一个数组,叫做support vector,还有一个数组,叫做alpha,有一个浮点数,叫做rho;
  将alpha矩阵同support vector相乘,注意,alpha*supportVector,将得到一个列向量。之后,再该列向量的最后添加一个元素rho。
  如此,变得到了一个分类器,利用该分类器,直接替换opencv中行人检测默认的那个分类器(cv::HOGDescriptor::setSVMDetector()),
  就可以利用你的训练样本训练出来的分类器进行行人检测了。
  ***************************************************************************************************/
  DescriptorDim = svm.get_var_count();//特征向量的维数,即HOG描述子的维数
  int supportVectorNum = svm.get_support_vector_count();//支持向量的个数
 // cout<<"支持向量个数:"<<supportVectorNum<<endl;

  Mat alphaMat = Mat::zeros(1, supportVectorNum, CV_32FC1);//alpha向量,长度等于支持向量个数
  Mat supportVectorMat = Mat::zeros(supportVectorNum, DescriptorDim, CV_32FC1);//支持向量矩阵
  Mat resultMat = Mat::zeros(1, DescriptorDim, CV_32FC1);//alpha向量乘以支持向量矩阵的结果

  //将支持向量的数据复制到supportVectorMat矩阵中
  for(int i=0; i<supportVectorNum; i++)
  {
    const float * pSVData = svm.get_support_vector(i);//返回第i个支持向量的数据指针
    for(int j=0; j<DescriptorDim; j++)
    {
      //cout<<pData[j]<<" ";
      supportVectorMat.at<float>(i,j) = pSVData[j];
    }
  }

  //将alpha向量的数据复制到alphaMat中
  double * pAlphaData = svm.get_alpha_vector();//返回SVM的决策函数中的alpha向量
  for(int i=0; i<supportVectorNum; i++)
  {
    alphaMat.at<float>(0,i) = pAlphaData[i];
  }

  //计算-(alphaMat * supportVectorMat),结果放到resultMat中
  //gemm(alphaMat, supportVectorMat, -1, 0, 1, resultMat);//不知道为什么加负号?
  resultMat = -1 * alphaMat * supportVectorMat;

  //得到最终的setSVMDetector(const vector<float>& detector)参数中可用的检测子
  vector<float> myDetector;
  //将resultMat中的数据复制到数组myDetector中
  for(int i=0; i<DescriptorDim; i++)
  {
    myDetector.push_back(resultMat.at<float>(0,i));
  }
  //最后添加偏移量rho,得到检测子
  myDetector.push_back(svm.get_rho());
 // cout<<"检测子维数:"<<myDetector.size()<<endl;
  //设置HOGDescriptor的检测子
 // HOGDescriptor myHOG_vertical(Size(15,30),Size(5,10),Size(5,5),Size(5,5),9,1,-1.0,0,0.2,true,34);//此处若不加括号中的参数会采用opencv中默认的HOG检测子参数,会与之前设定的参数矛盾,纠结了一下午!!!
  myHOG.setSVMDetector(myDetector);//传入hog_vertical.cpp中的setSVMDetector函数中的svmDetector中
  //myDetector.push_back(svm.get_rho());/////////////////////////////////daigai
  //myHOG_vertical.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

  //保存检测子参数到文件
  ofstream fout("src\\HOGDetectorForOpenCV_BenchMark.txt");
  for(int i=0; i<myDetector.size(); i++)
  {
    fout<<myDetector[i]<<endl;
  }
  }





  else
{
	if(TRAIN)
  {
    string horz_ImgName;//图片名(绝对路径)
    ifstream horz_finPos("D:\\Traffic Light Detection\\JY_TrainingSamples\\horz_positive\\horz_Pos.txt");
    ifstream horz_finNeg("D:\\Traffic Light Detection\\JY_TrainingSamples\\horz_Neg\\horz_Neg.txt");
	// ifstream finNeg("C:\\Users\\JY\\Desktop\\test\\test.txt");//负样本图片的文件名列表
   
	
	Mat Horz_sampleFeatureMat;//所有训练样本的特征向量组成的矩阵,行数等于所有样本的个数,列数等于HOG描述子维数	
    Mat Horz_sampleLabelMat;//训练样本的类别向量,行数等于所有样本的个数,列数等于1;1表示有人,-1表示无人

    //依次读取正样本图片,生成HOG描述子

    for(int num=0; num<HORZ_PosSamNO && getline(horz_finPos,horz_ImgName); num++)
    {
		cout<<"处理:"<<horz_ImgName<<endl;
      Mat horz_src = imread(horz_ImgName);//读取图片

   
	  resize(horz_src,horz_src,Size(30,15));
	//resize(horz_src,horz_src,Size(50,20));
	

      vector<float> horz_descriptors;//HOG描述子向量
      hog_horz.compute(horz_src,horz_descriptors,Size(8,8));//计算HOG描述子,检测窗口移动步长(8,8)
	 // hog_vertical.compute(src,descriptors,Size(5,5));//计算HOG描述子,检测窗口移动步长(8,8)
      cout<<"描述子维数:"<<horz_descriptors.size()<<endl;

      //处理第一个样本时初始化特征向量矩阵和类别矩阵,因为只有知道了特征向量的维数才能初始化特征向量矩阵
      if( 0 == num )
      {
        DescriptorDim = horz_descriptors.size();//HOG描述子的维数
        //初始化所有训练样本的特征向量组成的矩阵,行数等于所有样本的个数,列数等于HOG描述子维数sampleFeatureMat
        Horz_sampleFeatureMat = Mat::zeros(HORZ_PosSamNO+HORZ_NegSamNO+HORZ_HardExampleNO, DescriptorDim, CV_32FC1);
		//sampleFeatureMat = Mat::zeros(PosSamNO+5742*NegSamNO+HardExampleNO, DescriptorDim, CV_32FC1);
        //初始化训练样本的类别向量,行数等于所有样本的个数,列数等于1;1表示有人,0表示无人
        Horz_sampleLabelMat = Mat::zeros(HORZ_PosSamNO+HORZ_NegSamNO+HORZ_HardExampleNO, 1, CV_32FC1);
		//sampleLabelMat = Mat::zeros(PosSamNO+5742*NegSamNO+HardExampleNO, 1, CV_32FC1);
      }

      //将计算好的HOG描述子复制到样本特征矩阵sampleFeatureMat
      for(int i=0; i<DescriptorDim; i++)
		Horz_sampleFeatureMat.at<float>(num,i) = horz_descriptors[i];//第num个样本的特征向量中的第i个元素
        Horz_sampleLabelMat.at<float>(num,0) = 1;//正样本类别为1,有人
    }
	
    //依次读取负样本图片,生成HOG描述子

    for(int num=0; num<HORZ_NegSamNO && getline(horz_finNeg,horz_ImgName); num++)
    {
      cout<<"处理:"<<horz_ImgName<<endl;
      Mat horz_src = imread(horz_ImgName);//读取图片
	 
      //resize(src,src,Size(64,128));
	 resize(horz_src,horz_src,Size(30,15));
	   //resize(horz_src,horz_src,Size(50,20));
	  //cvtColor(src,gray,CV_RGB2GRAY);
      vector<float> horz_descriptors;//HOG描述子向量
	  hog_horz.compute(horz_src,horz_descriptors,Size(8,8));//计算HOG描述子,检测窗口移动步长(8,8)
      cout<<"描述子维数:"<<horz_descriptors.size()<<endl;

      

    for(int i=0; i<DescriptorDim; i++)
        Horz_sampleFeatureMat.at<float>(num+HORZ_PosSamNO,i) = horz_descriptors[i];//第PosSamNO+num个样本的特征向量中的第i个元素
		Horz_sampleLabelMat.at<float>(num+HORZ_PosSamNO,0) = -1;//负样本类别为-1,无人
    }

    //处理HardExample负样本
    if(HORZ_HardExampleNO > 0)
    {
      ifstream horz_finHardExample("D:\\Traffic Light Detection\\JY_TrainingSamples\\horz_HardExample\\hardexample.txt");//HardExample负样本的文件名列表
      //依次读取HardExample负样本图片,生成HOG描述子
      for(int num=0; num<HORZ_HardExampleNO && getline(horz_finHardExample,horz_ImgName); num++)
      {
        cout<<"处理:"<<horz_ImgName<<endl;
       // ImgName = ImgName;//加上HardExample负样本的路径名
        Mat horz_src = imread(horz_ImgName);//读取图片

		
		//resize(src,src,Size(64,128));
		resize(horz_src,horz_src,Size(30,15));
		//resize(src,src,Size(10,20));
		//cvtColor(src,gray,CV_RGB2GRAY);
        vector<float> horz_descriptors;//HOG描述子向量
        hog_horz.compute(horz_src,horz_descriptors,Size(8,8));//计算HOG描述子,检测窗口移动步长(8,8)
		//hog_vertical.compute(src,descriptors,Size(5,5));//计算HOG描述子,检测窗口移动步长(8,8)
        //cout<<"描述子维数:"<<descriptors.size()<<endl;

        //将计算好的HOG描述子复制到样本特征矩阵sampleFeatureMat
        for(int i=0; i<DescriptorDim; i++)
          Horz_sampleFeatureMat.at<float>(num+HORZ_PosSamNO+HORZ_NegSamNO,i) = horz_descriptors[i];//第PosSamNO+num个样本的特征向量中的第i个元素
		  Horz_sampleLabelMat.at<float>(num+HORZ_PosSamNO+HORZ_NegSamNO,0) = -1;//负样本类别为-1,无人
      }
    }

    ////输出样本的HOG特征向量矩阵到文件
    //ofstream fout("SampleFeatureMat.txt");
    //for(int i=0; i<PosSamNO+NegSamNO; i++)
    //{
    //	fout<<i<<endl;
    //	for(int j=0; j<DescriptorDim; j++)
    //		fout<<sampleFeatureMat.at<float>(i,j)<<"  ";
    //	fout<<endl;
    //}

    //训练SVM分类器
    //迭代终止条件,当迭代满1000次或误差小于FLT_EPSILON时停止迭代
    CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 1000, FLT_EPSILON);
    //SVM参数:SVM类型为C_SVC;线性核函数;松弛因子C=0.01
    CvSVMParams param(CvSVM::C_SVC, CvSVM::LINEAR, 0, 1, 0, 0.01, 0, 0, 0, criteria);
    cout<<"开始训练SVM分类器"<<endl;
    svm.train(Horz_sampleFeatureMat, Horz_sampleLabelMat, Mat(), Mat(), param);//训练分类器
    cout<<"训练完成"<<endl;
    svm.save("src\\SVM_HOG_Horz.xml");//将训练好的SVM模型保存为xml文件

  }
  else //若TRAIN为false,从XML文件读取训练好的分类器
  {
    svm.load("src\\SVM_HOG_Horz.xml");//从XML文件读取训练好的SVM模型
  }


  /*************************************************************************************************
  线性SVM训练完成后得到的XML文件里面,有一个数组,叫做support vector,还有一个数组,叫做alpha,有一个浮点数,叫做rho;
  将alpha矩阵同support vector相乘,注意,alpha*supportVector,将得到一个列向量。之后,再该列向量的最后添加一个元素rho。
  如此,变得到了一个分类器,利用该分类器,直接替换opencv中行人检测默认的那个分类器(cv::HOGDescriptor::setSVMDetector()),
  就可以利用你的训练样本训练出来的分类器进行行人检测了。
  ***************************************************************************************************/
  DescriptorDim = svm.get_var_count();//特征向量的维数,即HOG描述子的维数
  int supportVectorNum = svm.get_support_vector_count();//支持向量的个数
  cout<<"支持向量个数:"<<supportVectorNum<<endl;

  Mat alphaMat = Mat::zeros(1, supportVectorNum, CV_32FC1);//alpha向量,长度等于支持向量个数
  Mat supportVectorMat = Mat::zeros(supportVectorNum, DescriptorDim, CV_32FC1);//支持向量矩阵
  Mat resultMat = Mat::zeros(1, DescriptorDim, CV_32FC1);//alpha向量乘以支持向量矩阵的结果

  //将支持向量的数据复制到supportVectorMat矩阵中
  for(int i=0; i<supportVectorNum; i++)
  {
    const float * pSVData = svm.get_support_vector(i);//返回第i个支持向量的数据指针
    for(int j=0; j<DescriptorDim; j++)
    {
      //cout<<pData[j]<<" ";
      supportVectorMat.at<float>(i,j) = pSVData[j];
    }
  }

  //将alpha向量的数据复制到alphaMat中
  double * pAlphaData = svm.get_alpha_vector();//返回SVM的决策函数中的alpha向量
  for(int i=0; i<supportVectorNum; i++)
  {
    alphaMat.at<float>(0,i) = pAlphaData[i];
  }

  //计算-(alphaMat * supportVectorMat),结果放到resultMat中
  //gemm(alphaMat, supportVectorMat, -1, 0, 1, resultMat);//不知道为什么加负号?
  resultMat = -1 * alphaMat * supportVectorMat;

  //得到最终的setSVMDetector(const vector<float>& detector)参数中可用的检测子
  vector<float> myDetector;
  //将resultMat中的数据复制到数组myDetector中
  for(int i=0; i<DescriptorDim; i++)
  {
    myDetector.push_back(resultMat.at<float>(0,i));
  }
  //最后添加偏移量rho,得到检测子
  myDetector.push_back(svm.get_rho());
  cout<<"检测子维数:"<<myDetector.size()<<endl;
  //设置HOGDescriptor的检测子
 // HOGDescriptor myHOG_vertical(Size(15,30),Size(5,10),Size(5,5),Size(5,5),9,1,-1.0,0,0.2,true,34);//此处若不加括号中的参数会采用opencv中默认的HOG检测子参数,会与之前设定的参数矛盾,纠结了一下午!!!
  myHOG.setSVMDetector(myDetector);//传入hog_vertical.cpp中的setSVMDetector函数中的svmDetector中
  //myDetector.push_back(svm.get_rho());/////////////////////////////////daigai
  //myHOG_vertical.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());

  //保存检测子参数到文件
  ofstream fout("src\\HOGDetectorForOpenCV_horz.txt");
  for(int i=0; i<myDetector.size(); i++)
  {
    fout<<myDetector[i]<<endl;
  }
}

  

}