예제 #1
0
int main( int argc, char **argv )
{
    const char* facebaseopt="--facebase=";
    char* classifierbase = 0;
    char* aviname = 0;
    int auto_run = 0;
    
    if( argc > 1 && argv[argc-1][0] != '-' )
    {
        aviname = argv[argc-1];
        auto_run = 1;
        argc--;
    }
    
    if( argc > 1 && strncmp(argv[argc-1],facebaseopt,strlen(facebaseopt))==0 )
    {
        classifierbase=argv[argc-1] + strlen(facebaseopt);
        argc--;
    }
    
    if( !InitFaceDetect(classifierbase))
    {
        fprintf( stderr, "Could not locate face classifier base at %s\n"
                         "Use --facebase=<classifier base path> option to specify the base\n",
                 classifierbase );
        return -1;
    }
	
    cpu_freq = cvGetTickFrequency(); 
    printf("Tick frequency (*10^-6): %g\n", cpu_freq );
 
    Fl_Window* w;
    {
        Fl_Window* o = root_window = new Fl_Window( root_w, root_h );
        w = o;
        {
            Fl_Tabs* o = new Fl_Tabs( 10, 10, root_w - 20, root_h - 100 );
            // camera tab
            {
                Fl_Group* o = new Fl_Group( 10, 30, root_w - 20, root_h - 110, "Face Detection" );
                {
                    VideoWindow* o = new VideoWindow( 15, 35, root_w - 30, root_h - 120 );
                    video_window = o;
                    o->box( FL_BORDER_BOX );
                    o->color(0);
                }
                o->end();
            }
            o->end();
            Fl_Group::current()->resizable(o);
        }
        {
            const int bwidth = 30, bheight = 30;
            play_button = new Fl_Button( 10, root_h - 35, bwidth, bheight, "@>" );
            play_button->callback((Fl_Callback*)cb_PauseResume);
            play_button->deactivate();
            stop_button = new Fl_Button( 10 + bwidth, root_h - 35, bwidth, bheight, "@square" );
            stop_button->callback((Fl_Callback*)cb_Stop);
            stop_button->deactivate();
            video_button = new Fl_Button( 10 + bwidth*2, root_h - 35, bwidth, bheight, "..." );
            video_button->callback((Fl_Callback*)cb_Open);
            cam_button = new Fl_Button( 10 + bwidth*3, root_h - 35, bwidth, bheight, "[o]" );
            cam_button->callback((Fl_Callback*)cb_StartCam);
            video_pos = new Fl_Value_Slider( 10 + bwidth*4 + 10, root_h - 35, 200, 20, "Position" );
            video_pos->type( FL_HOR_NICE_SLIDER );
            record_button = new Fl_Button( 10 + bwidth*4 + 230, root_h - 35, bwidth, bheight, "@circle" );
            record_button->labelcolor(FL_RED);
            record_button->callback((Fl_Callback*)cb_StartStopRecord );
            record_button->deactivate();
            fps_box = new Fl_Box( 10, root_h - 75, bwidth*4, bheight, "<No data>" );
            fps_box->box( FL_DOWN_BOX );
        }
        o->end();
    }
    Fl::visual(FL_RGB);
    w->show(argc, argv);
    if( auto_run )
        Fl::add_timeout( 0.1, cb_AutoRun, aviname );
    Fl::run();
    cb_Exit(0,0);
    return 0;
}
예제 #2
0
double toc()
{
	return ((double)cvGetTickCount() - g_tic) / ((double)cvGetTickFrequency()*1e6);
}
void MainWindow::updateImage()
{
    //cap>>frame;
    //time1.stop();
    if (receiveFlag == 0)
    {
        tempImg = cvQueryFrame(capture);//c++接口无法打开视频,利用c接口中转一下,所以需要将iplimage转换为Mat
        if (tempImg)
            rawFrame = Mat(tempImg);
        else
        {
            time1.stop();
            return;
        }
    }

    if (!rawFrame.empty())
       cv::resize(rawFrame,frame,Size(320,240));
    else
        qDebug()<<"no images!";

   // equalizeHist(frame,frame2);
    if(!frame.empty())
    {
        //emit startSend(frame);

        double t=(double)cvGetTickCount();
        switch (algorithmFlag)
        {
            case 0:
                divog.trackObject(frame,sailencyMap,motionMap);
                findFlag = divog.getFindAns();
                break;
            case 1:
                sr.getSRsailencyMap(frame,sailencyMap,motionMap);
                findFlag = sr.getFindAns();
                break;
            case 2:
                myft.getFTmap(frame,sailencyMap,motionMap,learnRate);
                findFlag = myft.getFindAns();
                break;
            case 3:
                itti.getSailencyMapItti(frame,sailencyMap,motionMap);
                findFlag = itti.getFindAns();
                break;
            case 4:
                lc.getLCsailencyMap(frame,sailencyMap,motionMap);
                findFlag = lc.getFindAns();
                break;
            case 5:
                gmm.getGMMMap(frame,motionMap);
                findFlag = gmm.getFindAns();
                //break;
            case 6:
                gmm.getGrayGMMMap(frame,motionMap);
                findFlag = gmm.getFindAns();

        }
        if (sendFlag == 1)
            emit startSend(frame);
        if (findFlag == 1)
            emit detectSuccess();
        else
            emit detectFailed();
        t=((double)cvGetTickCount() - t)/(cvGetTickFrequency()*1000);
        fpsNum = 1000/t;
        //qDebug()<<fpsNum;

        cvtColor(frame, frame, CV_BGR2RGB);
        //getSailencyMap(frame,sailencyMap);
        //qDebug()<<sailencyMap.elemSize();
        //qDebug()<<frame.elemSize();
        //floatMap.convertTo(sailencyMap,CV_8U);
        //cvtColor(sailencyMap, sailencyMap, CV_BGR2RGB);
        this->update();
    }
    //time1.start();

}
예제 #4
0
파일: main.cpp 프로젝트: Barbakas/windage
void  main()
{
	char message[100];
	cvNamedWindow("template");
	cvNamedWindow("sampling");
	cvNamedWindow("result");

	// initialize
	sprintf_s(message, IMAGE_SEQ_FILE_NAME, 0);
	IplImage* saveImage = cvLoadImage(message, 0);
	if(GAUSSIAN_BLUR > 0)
		cvSmooth(saveImage, saveImage, CV_GAUSSIAN, GAUSSIAN_BLUR, GAUSSIAN_BLUR);

	int width = saveImage->width;
	int height = saveImage->height;
	int startX = (width-TEMPLATE_WIDTH)/2;
	int startY = (height-TEMPLATE_HEIGHT)/2;
	float q = TEMPLATE_WIDTH * TEMPLATE_HEIGHT;
	
	IplImage* inputImage = NULL;
	IplImage* resultImage = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
	IplImage* templateImage = cvCreateImage(cvSize(TEMPLATE_WIDTH, TEMPLATE_HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* samplingImage = NULL;

	// set template image
	CvRect rect = cvRect(startX, startY, TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
	cvSetImageROI(saveImage, rect);
	cvCopyImage(saveImage, templateImage);
	cvShowImage("template", templateImage);
	cvResetImageROI(saveImage);
	cvReleaseImage(&saveImage);

	// initial homography
	windage::Matrix3 homography(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0);
	homography._13 = startX;
	homography._23 = startY;
	windage::Matrix3 e = homography;

	// Template based Tracking using Inverse Compositional
	windage::InverseCompositional* ic = new windage::InverseCompositional(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
	ic->AttatchTemplateImage(templateImage);
	ic->SetInitialHomography(e);
	ic->Initialize();

	// homography update stack
	std::vector<windage::Matrix3> homographyList;

	cvWaitKey();

	float sumTime = 0.0;
	int sumIter = 0;
	bool processing =true;
	int k = 0;
	while(processing)
	{
		if(k >= IMAGE_SEQ_COUNT)
			processing = false;

		// load image
		if(inputImage) cvReleaseImage(&inputImage);
		sprintf_s(message, IMAGE_SEQ_FILE_NAME, k);
		inputImage = cvLoadImage(message, 0);
		if(GAUSSIAN_BLUR > 0)
			cvSmooth(inputImage, inputImage, CV_GAUSSIAN, GAUSSIAN_BLUR, GAUSSIAN_BLUR);

		cvCvtColor(inputImage, resultImage, CV_GRAY2BGR);

		// processing
		int64 startTime = cvGetTickCount();
		
		float error = 0.0;
		float delta = 1.0;
		int iter = 0;
		homographyList.clear();
		for(iter=0; iter<MAX_ITERATION; iter++)
		{
			error = ic->UpdateHomography(inputImage, &delta);
			homography = ic->GetHomography();
			samplingImage = ic->GetSamplingImage();

			homographyList.push_back(homography);

//			if(delta < HOMOGRAPHY_DELTA)
//				break;
		}
		int64 endTime = cvGetTickCount();
		k++;
		sumIter+= iter;

		// draw result
		int count = homographyList.size();
		for(int i=0; i<count; i++)
 			DrawResult(resultImage, homographyList[i], CV_RGB(((count-i)/(float)count) * 255.0, (i/(float)count) * 255.0, 0), 1);
 		
		double processingTime = (endTime - startTime)/(cvGetTickFrequency() * 1000.0);
		sprintf_s(message, "%03d >> processing time : %.2lf ms (%02d iter), error : %.2lf", k, processingTime, iter, error);
		std::cout << message << std::endl;
		sumTime += processingTime;

		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 15), message, 0.6);

		// draw image
		cvShowImage("sampling", samplingImage);
		cvShowImage("result", resultImage);

		char ch = cvWaitKey(1);
		switch(ch)
		{
		case ' ':
			{
				char tempch = cvWaitKey(0);
				if(tempch == 's' || tempch == 'S')
				{
					cvSaveImage("ICAlgorithm.jpg", resultImage);
				}
			}
			break;
		case 'i':
		case 'I':
			k++;
			break;
		case 'o':
		case 'O':
			k-=5;
			break;
		case 'q':
		case 'Q':
			processing = false;
			break;
		}

	}

	std::cout << "average iteration : " << sumIter/(float)IMAGE_SEQ_COUNT << std::endl;
	std::cout << "average processing ime : " << sumTime/(float)IMAGE_SEQ_COUNT << " ms" << std::endl;

	cvReleaseImage(&resultImage);
	cvDestroyAllWindows();
}
예제 #5
0
/** 
* @author   JIA Pei
* @version  2010-02-04
* @brief    Set detection configuration
* @param    iImg        Input - input image, a face
* @param    faceOrient  Input - face orientation
* @param    lefteye     Input - whether to detect left eye
* @param    righteye    Input - whether to detect right eye
* @param    nose        Input - whether to detect nose
* @param    mouth       Input - whether to detect mouth
*/
double CFaceDetectionAlgs::VO_FaceComponentsDetection(  const Mat& iImg,
                                                        int faceOrient,
                                                        bool lefteye,
                                                        bool righteye,
                                                        bool nose,
                                                        bool mouth)
{
    double res = (double)cvGetTickCount();

    // Emphasized by JIA Pei, make sure you update m_CVDetectedFaceWindow finally!
    // set the top left half to look for left eye
    if ( lefteye )
    {
        this->m_CVLeftEyePossibleWindow = CFaceDetectionAlgs::VO_SetDetectedFacePartsPossibleWindow(iImg.cols, iImg.rows, VO_FacePart::LEFTEYE, faceOrient);
        this->m_bLeftEyeDetected = this->VO_FacePartDetection ( iImg, this->m_CVLeftEyePossibleWindow, this->m_VOFaceComponents0.m_rectLeftEye, VO_FacePart::LEFTEYE);

        if ( this->m_bLeftEyeDetected )
        {            
            this->m_VOFaceComponents.m_rectLeftEye.x = ( int ) ( this->m_VOFaceComponents0.m_rectLeftEye.x + this->m_CVLeftEyePossibleWindow.x + this->m_CVDetectedFaceWindow2SM.x );
            this->m_VOFaceComponents.m_rectLeftEye.y = ( int ) ( this->m_VOFaceComponents0.m_rectLeftEye.y + this->m_CVLeftEyePossibleWindow.y + this->m_CVDetectedFaceWindow2SM.y );
            this->m_VOFaceComponents.m_rectLeftEye.width = ( int ) ( this->m_VOFaceComponents0.m_rectLeftEye.width );
            this->m_VOFaceComponents.m_rectLeftEye.height = ( int ) ( this->m_VOFaceComponents0.m_rectLeftEye.height );
            this->m_VOFaceComponents0.m_rectLeftEye.x = ( int ) ( this->m_VOFaceComponents0.m_rectLeftEye.x + this->m_CVLeftEyePossibleWindow.x );
            this->m_VOFaceComponents0.m_rectLeftEye.y = ( int ) ( this->m_VOFaceComponents0.m_rectLeftEye.y + this->m_CVLeftEyePossibleWindow.y );

        }
        else
        {
            this->m_VOFaceComponents.m_rectLeftEye.x = ( int ) ( this->m_CVLeftEyePossibleWindow.x + this->m_CVDetectedFaceWindow2SM.x );
            this->m_VOFaceComponents.m_rectLeftEye.y = ( int ) ( this->m_CVLeftEyePossibleWindow.y + this->m_CVDetectedFaceWindow2SM.y );
            this->m_VOFaceComponents.m_rectLeftEye.width = ( int ) ( this->m_CVLeftEyePossibleWindow.width );
            this->m_VOFaceComponents.m_rectLeftEye.height = ( int ) ( this->m_CVLeftEyePossibleWindow.height);
            this->m_VOFaceComponents0.m_rectLeftEye.x = ( int ) ( this->m_CVLeftEyePossibleWindow.x );
            this->m_VOFaceComponents0.m_rectLeftEye.y = ( int ) ( this->m_CVLeftEyePossibleWindow.y );

        }
    }

    // set the top right half to look for right eye
    if ( righteye )
    {
        this->m_CVRightEyePossibleWindow = CFaceDetectionAlgs::VO_SetDetectedFacePartsPossibleWindow(iImg.cols, iImg.rows, VO_FacePart::RIGHTEYE, faceOrient);
        this->m_bRightEyeDetected = this->VO_FacePartDetection ( iImg, this->m_CVRightEyePossibleWindow, this->m_VOFaceComponents0.m_rectRightEye, VO_FacePart::RIGHTEYE );

        if ( this->m_bRightEyeDetected )
        {
            this->m_VOFaceComponents.m_rectRightEye.x = ( int ) ( this->m_VOFaceComponents0.m_rectRightEye.x + this->m_CVRightEyePossibleWindow.x + this->m_CVDetectedFaceWindow2SM.x );
            this->m_VOFaceComponents.m_rectRightEye.y = ( int ) ( this->m_VOFaceComponents0.m_rectRightEye.y + this->m_CVRightEyePossibleWindow.y + this->m_CVDetectedFaceWindow2SM.y );
            this->m_VOFaceComponents.m_rectRightEye.width = ( int ) ( this->m_VOFaceComponents0.m_rectRightEye.width );
            this->m_VOFaceComponents.m_rectRightEye.height = ( int ) ( this->m_VOFaceComponents0.m_rectRightEye.height);
            this->m_VOFaceComponents0.m_rectRightEye.x = ( int ) ( this->m_VOFaceComponents0.m_rectRightEye.x + this->m_CVRightEyePossibleWindow.x );
            this->m_VOFaceComponents0.m_rectRightEye.y = ( int ) ( this->m_VOFaceComponents0.m_rectRightEye.y + this->m_CVRightEyePossibleWindow.y );

        }
        else
        {
            this->m_VOFaceComponents.m_rectRightEye.x = ( int ) ( this->m_CVRightEyePossibleWindow.x + this->m_CVDetectedFaceWindow2SM.x );
            this->m_VOFaceComponents.m_rectRightEye.y = ( int ) ( this->m_CVRightEyePossibleWindow.y + this->m_CVDetectedFaceWindow2SM.y );
            this->m_VOFaceComponents.m_rectRightEye.width = ( int ) ( this->m_CVRightEyePossibleWindow.width );
            this->m_VOFaceComponents.m_rectRightEye.height = ( int ) ( this->m_CVRightEyePossibleWindow.height );
            this->m_VOFaceComponents0.m_rectRightEye.x = ( int ) ( this->m_CVRightEyePossibleWindow.x );
            this->m_VOFaceComponents0.m_rectRightEye.y = ( int ) ( this->m_CVRightEyePossibleWindow.y );

        }
    }

    if ( nose )
    {
        this->m_CVNosePossibleWindow = CFaceDetectionAlgs::VO_SetDetectedFacePartsPossibleWindow(iImg.cols, iImg.rows, VO_FacePart::NOSE, faceOrient);
        this->m_bNoseDetected = this->VO_FacePartDetection ( iImg, this->m_CVNosePossibleWindow, this->m_VOFaceComponents0.m_rectNose, VO_FacePart::NOSE );

        if ( this->m_bNoseDetected )
        {
            this->m_VOFaceComponents.m_rectNose.x = ( int ) ( this->m_VOFaceComponents0.m_rectNose.x + this->m_CVNosePossibleWindow.x + this->m_CVDetectedFaceWindow2SM.x );
            this->m_VOFaceComponents.m_rectNose.y = ( int ) ( this->m_VOFaceComponents0.m_rectNose.y + this->m_CVNosePossibleWindow.y + this->m_CVDetectedFaceWindow2SM.y );
            this->m_VOFaceComponents.m_rectNose.width = ( int ) ( this->m_VOFaceComponents0.m_rectNose.width );
            this->m_VOFaceComponents.m_rectNose.height = ( int ) ( this->m_VOFaceComponents0.m_rectNose.height );
            this->m_VOFaceComponents0.m_rectNose.x = ( int ) ( this->m_VOFaceComponents0.m_rectNose.x + this->m_CVNosePossibleWindow.x );
            this->m_VOFaceComponents0.m_rectNose.y = ( int ) ( this->m_VOFaceComponents0.m_rectNose.y + this->m_CVNosePossibleWindow.y );

        }
        else
        {
            this->m_VOFaceComponents.m_rectNose.x = ( int ) ( this->m_CVNosePossibleWindow.x + this->m_CVDetectedFaceWindow2SM.x );
            this->m_VOFaceComponents.m_rectNose.y = ( int ) ( this->m_CVNosePossibleWindow.y + this->m_CVDetectedFaceWindow2SM.y );
            this->m_VOFaceComponents.m_rectNose.width = ( int ) ( this->m_CVNosePossibleWindow.width );
            this->m_VOFaceComponents.m_rectNose.height = ( int ) ( this->m_CVNosePossibleWindow.height );
            this->m_VOFaceComponents0.m_rectNose.x = ( int ) ( this->m_CVNosePossibleWindow.x );
            this->m_VOFaceComponents0.m_rectNose.y = ( int ) ( this->m_CVNosePossibleWindow.y );

        }
    }

    if ( mouth )
    {
        this->m_CVMouthPossibleWindow = CFaceDetectionAlgs::VO_SetDetectedFacePartsPossibleWindow(iImg.cols, iImg.rows, VO_FacePart::LIPOUTERLINE, faceOrient);
        this->m_bMouthDetected = this->VO_FacePartDetection ( iImg, this->m_CVMouthPossibleWindow, this->m_VOFaceComponents0.m_rectMouth, VO_FacePart::LIPOUTERLINE );

        if ( this->m_bMouthDetected )
        {
            this->m_VOFaceComponents0.m_rectMouth.x += this->m_VOFaceComponents0.m_rectMouth.width*0.1;
            // ensure this->m_VOFaceComponents.m_rectMouth.x + this->m_VOFaceComponents.m_rectMouth.width < iImg.cols-1
            while (this->m_VOFaceComponents0.m_rectMouth.x + this->m_VOFaceComponents0.m_rectMouth.width >= iImg.cols-1)
            {
                this->m_VOFaceComponents0.m_rectMouth.x--;
            }

            this->m_VOFaceComponents.m_rectMouth.x = ( int ) ( this->m_VOFaceComponents0.m_rectMouth.x + this->m_CVMouthPossibleWindow.x + this->m_CVDetectedFaceWindow2SM.x );
            this->m_VOFaceComponents.m_rectMouth.y = ( int ) ( this->m_VOFaceComponents0.m_rectMouth.y + this->m_CVMouthPossibleWindow.y + this->m_CVDetectedFaceWindow2SM.y );
            this->m_VOFaceComponents.m_rectMouth.width = ( int ) ( this->m_VOFaceComponents0.m_rectMouth.width );
            this->m_VOFaceComponents.m_rectMouth.height = ( int ) ( this->m_VOFaceComponents0.m_rectMouth.height);
            this->m_VOFaceComponents0.m_rectMouth.x = ( int ) ( this->m_VOFaceComponents0.m_rectMouth.x + this->m_CVMouthPossibleWindow.x );
            this->m_VOFaceComponents0.m_rectMouth.y = ( int ) ( this->m_VOFaceComponents0.m_rectMouth.y + this->m_CVMouthPossibleWindow.y );

            // For mouth, we need a modification due to the reason that the mouth is not able to be well detected.
            // We need small adjustment.
            this->m_VOFaceComponents0.m_rectMouth.y -= this->m_VOFaceComponents0.m_rectMouth.height*0.2;
            this->m_VOFaceComponents.m_rectMouth.y -= this->m_VOFaceComponents.m_rectMouth.height*0.2;
            // ensure this->m_VOFaceComponents.m_rectMouth.y > 0
            while (this->m_VOFaceComponents0.m_rectMouth.y <= 0)
            {
                this->m_VOFaceComponents0.m_rectMouth.y++;
            }
            while (this->m_VOFaceComponents.m_rectMouth.y <= 0)
            {
                this->m_VOFaceComponents.m_rectMouth.y++;
            }
        }
        else
        {
            this->m_VOFaceComponents.m_rectMouth.x = ( int ) ( this->m_CVMouthPossibleWindow.x + this->m_CVDetectedFaceWindow2SM.x );
            this->m_VOFaceComponents.m_rectMouth.y = ( int ) ( this->m_CVMouthPossibleWindow.y + this->m_CVDetectedFaceWindow2SM.y );
            this->m_VOFaceComponents.m_rectMouth.width = ( int ) ( this->m_CVMouthPossibleWindow.width );
            this->m_VOFaceComponents.m_rectMouth.height = ( int ) ( this->m_CVMouthPossibleWindow.height );
            this->m_VOFaceComponents0.m_rectMouth.x = ( int ) ( this->m_CVMouthPossibleWindow.x );
            this->m_VOFaceComponents0.m_rectMouth.y = ( int ) ( this->m_CVMouthPossibleWindow.y );

        }
    }

    // Default logical relationship among all the above 4 face parts
    // According to my observation, the mouth some times goes wrong.
    // the nose x direction goes wrong some times, but y direction keeps correct
    if ( this->m_bNoseDetected && this->m_bMouthDetected ) // mouth must be lower than nose
    {
        if ( this->m_VOFaceComponents.m_rectMouth.y+1.0f*this->m_VOFaceComponents.m_rectMouth.height/2.0f <= ( this->m_VOFaceComponents.m_rectNose.y + this->m_VOFaceComponents.m_rectNose.height ) )
            this->m_VOFaceComponents.m_rectMouth.y += this->m_VOFaceComponents.m_rectMouth.height;
        if ( this->m_VOFaceComponents0.m_rectMouth.y+1.0f*this->m_VOFaceComponents0.m_rectMouth.height/2.0f <= ( this->m_VOFaceComponents0.m_rectNose.y + this->m_VOFaceComponents0.m_rectNose.height ) )
            this->m_VOFaceComponents0.m_rectMouth.y += this->m_VOFaceComponents0.m_rectMouth.height;
    }

    res = ((double)cvGetTickCount() - res)
        / ((double)cvGetTickFrequency()*1000.);
    return res;
}
예제 #6
0
/* ///////////////////// chess_corner_test ///////////////////////// */
void CV_ChessboardDetectorTimingTest::run( int start_from )
{
    int code = cvtest::TS::OK;

    /* test parameters */
    std::string   filepath;
    std::string   filename;

    CvMat*  _v = 0;
    CvPoint2D32f* v;

    IplImage img;
    IplImage* gray = 0;
    IplImage* thresh = 0;

    int  idx, max_idx;
    int  progress = 0;

    filepath = cv::format("%scv/cameracalibration/", ts->get_data_path().c_str() );
    filename = cv::format("%schessboard_timing_list.dat", filepath.c_str() );
    CvFileStorage* fs = cvOpenFileStorage( filename.c_str(), 0, CV_STORAGE_READ );
    CvFileNode* board_list = fs ? cvGetFileNodeByName( fs, 0, "boards" ) : 0;

    if( !fs || !board_list || !CV_NODE_IS_SEQ(board_list->tag) ||
        board_list->data.seq->total % 4 != 0 )
    {
        ts->printf( cvtest::TS::LOG, "chessboard_timing_list.dat can not be readed or is not valid" );
        code = cvtest::TS::FAIL_MISSING_TEST_DATA;
        goto _exit_;
    }

    max_idx = board_list->data.seq->total/4;

    for( idx = start_from; idx < max_idx; idx++ )
    {
        int count0 = -1;
        int count = 0;
        CvSize pattern_size;
        int result, result1 = 0;

        const char* imgname = cvReadString((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4), "dummy.txt");
        int is_chessboard = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4+1), 0);
        pattern_size.width = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4 + 2), -1);
        pattern_size.height = cvReadInt((CvFileNode*)cvGetSeqElem(board_list->data.seq,idx*4 + 3), -1);

        ts->update_context( this, idx-1, true );

        /* read the image */
        filename = cv::format("%s%s", filepath.c_str(), imgname );

        cv::Mat img2 = cv::imread( filename );
        img = img2;

        if( img2.empty() )
        {
            ts->printf( cvtest::TS::LOG, "one of chessboard images can't be read: %s\n", filename.c_str() );
            code = cvtest::TS::FAIL_MISSING_TEST_DATA;
            continue;
        }

        ts->printf(cvtest::TS::LOG, "%s: chessboard %d:\n", imgname, is_chessboard);

        gray = cvCreateImage( cvSize( img.width, img.height ), IPL_DEPTH_8U, 1 );
        thresh = cvCreateImage( cvSize( img.width, img.height ), IPL_DEPTH_8U, 1 );
        cvCvtColor( &img, gray, CV_BGR2GRAY );


        count0 = pattern_size.width*pattern_size.height;

        /* allocate additional buffers */
        _v = cvCreateMat(1, count0, CV_32FC2);
        count = count0;

        v = (CvPoint2D32f*)_v->data.fl;

        int64 _time0 = cvGetTickCount();
        result = cvCheckChessboard(gray, pattern_size);
        int64 _time01 = cvGetTickCount();

        OPENCV_CALL( result1 = cvFindChessboardCorners(
                 gray, pattern_size, v, &count, 15 ));
        int64 _time1 = cvGetTickCount();

        if( result != is_chessboard )
        {
            ts->printf( cvtest::TS::LOG, "Error: chessboard was %sdetected in the image %s\n",
                       result ? "" : "not ", imgname );
            code = cvtest::TS::FAIL_INVALID_OUTPUT;
            goto _exit_;
        }
        if(result != result1)
        {
            ts->printf( cvtest::TS::LOG, "Warning: results differ cvCheckChessboard %d, cvFindChessboardCorners %d\n",
                       result, result1);
        }

        int num_pixels = gray->width*gray->height;
        float check_chessboard_time = float(_time01 - _time0)/(float)cvGetTickFrequency(); // in us
        ts->printf(cvtest::TS::LOG, "    cvCheckChessboard time s: %f, us per pixel: %f\n",
                   check_chessboard_time*1e-6, check_chessboard_time/num_pixels);

        float find_chessboard_time = float(_time1 - _time01)/(float)cvGetTickFrequency();
        ts->printf(cvtest::TS::LOG, "    cvFindChessboard time s: %f, us per pixel: %f\n",
                   find_chessboard_time*1e-6, find_chessboard_time/num_pixels);

        cvReleaseMat( &_v );
        cvReleaseImage( &gray );
        cvReleaseImage( &thresh );
        progress = update_progress( progress, idx-1, max_idx, 0 );
    }

_exit_:

    /* release occupied memory */
    cvReleaseMat( &_v );
    cvReleaseFileStorage( &fs );
    cvReleaseImage( &gray );
    cvReleaseImage( &thresh );

    if( code < 0 )
        ts->set_failed_test_info( code );
}
예제 #7
0
/** 
 * @author     	JIA Pei
 * @version    	2009-10-04
 * @brief      	Object Tracking
 * @param      	img     		Input - image to be searched within
 * @param		objs			Input - object to be tracked
 * @param		smallSize		Input - smallSize
 * @param		bigSize			Input - bigSize
 * @return		detection time cost
*/
double CTrackingAlgs::Tracking(const cv::Mat& img,
                               cv::Rect& obj,
                               cv::Size smallSize,
                               cv::Size bigSize)
{
	double res = (double)cvGetTickCount();

	if(this->m_bTrackerInitialized)
	{
		switch(this->m_iTrackingMethod)
		{
		case CAMSHIFT:
			{
				CTrackingAlgs::CamshiftTracking(img,
												this->m_hist,
												obj,
												this->m_bObjectTracked,
												smallSize,
												bigSize);
				this->m_CVTrackedObjectRect = obj;
			}
			break;
		case KALMANFILTER:
			{
				CTrackingAlgs::KalmanTracking(	img,
												obj,
												this->m_bObjectTracked,
												smallSize,
												bigSize);
				this->m_CVTrackedObjectRect = obj;
			}
			break;
		case PARTICLEFILTER:
			{
				CTrackingAlgs::ParticleFilterTracking(	img,
														obj,
														this->m_bObjectTracked,
														smallSize,
														bigSize);
				this->m_CVTrackedObjectRect = obj;
			}
			break;
		case ASMAAM:
			{
				CTrackingAlgs::ASMAAMTracking(	img,
												obj,
												this->m_bObjectTracked,
												smallSize,
												bigSize);
				this->m_CVTrackedObjectRect = obj;
			}
			break;
		case NONE:
		default:
			{
				this->m_bObjectTracked 		= false;
				this->m_bTrackerInitialized	= false;
			}
			break;
		}
	}
	else
	{
		this->UpdateTracker(img, obj);
	}

	res = ((double)cvGetTickCount() - res) / ((double)cvGetTickFrequency()*1000.);
	return res;
}
예제 #8
0
    void update(double time,
                uint32_t* out,
                const uint32_t* in)
    {
        if (!cascade) {
            cvSetNumThreads(cvRound(threads * 100));
            if (classifier.length() > 0) {
                cascade = (CvHaarClassifierCascade*) cvLoad(classifier.c_str(), 0, 0, 0 );
                if (!cascade)
                    fprintf(stderr, "ERROR: Could not load classifier cascade %s\n", classifier.c_str());
                storage = cvCreateMemStorage(0);
            }
            else {
                memcpy(out, in, size * 4);
                return;
            }
        }

        // sanitize parameters
        search_scale = CLAMP(search_scale, 0.11, 1.0);
        neighbors = CLAMP(neighbors, 0.01, 1.0);

        // copy input image to OpenCV
        if( !image )
            image = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 4);
        memcpy(image->imageData, in, size * 4);
        
        // only re-detect periodically to control performance and reduce shape jitter
        int recheckInt = abs(cvRound(recheck * 1000));
        if ( recheckInt > 0 && count % recheckInt )
        {
            // skip detect
            count++;
//            fprintf(stderr, "draw-only counter %u\n", count);
        }
        else
        {
            count = 1;   // reset the recheck counter
            if (objects) // reset the list of objects
                cvClearSeq(objects);
            
            double elapsed = (double) cvGetTickCount();

            objects = detect();

            // use detection time to throttle frequency of re-detect vs. redraw (automatic recheck)
            elapsed = cvGetTickCount() - elapsed;
            elapsed = elapsed / ((double) cvGetTickFrequency() * 1000.0);

            // Automatic recheck uses an undocumented negative parameter value,
            // which is not compliant, but technically feasible.
            if (recheck < 0 && cvRound( elapsed / (1000.0 / (recheckInt + 1)) ) <= recheckInt)
                    count += recheckInt - cvRound( elapsed / (1000.0 / (recheckInt + 1)));
//            fprintf(stderr, "detection time = %gms counter %u\n", elapsed, count);
        }
        
        draw();
        
        // copy filtered OpenCV image to output
        memcpy(out, image->imageData, size * 4);
        cvReleaseImage(&image);
    }
예제 #9
0
int main( int argc, char** argv )
{
    char* filename = argc >= 2 ? argv[1] : (char*)"fruits.jpg";
    CvRNG rng = cvRNG(-1);

    if( (img0 = cvLoadImage(filename,1)) == 0 )
        return 0;

    printf( "Hot keys: \n"
            "\tESC - quit the program\n"
            "\tr - restore the original image\n"
            "\tw or SPACE - run watershed algorithm\n"
            "\t\t(before running it, roughly mark the areas on the image)\n"
            "\t  (before that, roughly outline several markers on the image)\n" );
    
    cvNamedWindow( "image", 1 );
    cvNamedWindow( "watershed transform", 1 );

    img = cvCloneImage( img0 );
    img_gray = cvCloneImage( img0 );
    wshed = cvCloneImage( img0 );
    marker_mask = cvCreateImage( cvGetSize(img), 8, 1 );
    markers = cvCreateImage( cvGetSize(img), IPL_DEPTH_32S, 1 );
    cvCvtColor( img, marker_mask, CV_BGR2GRAY );
    cvCvtColor( marker_mask, img_gray, CV_GRAY2BGR );

    cvZero( marker_mask );
    cvZero( wshed );
    cvShowImage( "image", img );
    cvShowImage( "watershed transform", wshed );
    cvSetMouseCallback( "image", on_mouse, 0 );

    for(;;)
    {
        int c = cvWaitKey(0);

        if( (char)c == 27 )
            break;

        if( (char)c == 'r' )
        {
            cvZero( marker_mask );
            cvCopy( img0, img );
            cvShowImage( "image", img );
        }

        if( (char)c == 'w' || (char)c == ' ' )
        {
            CvMemStorage* storage = cvCreateMemStorage(0);
            CvSeq* contours = 0;
            CvMat* color_tab;
            int i, j, comp_count = 0;
            //cvSaveImage( "wshed_mask.png", marker_mask );
            //marker_mask = cvLoadImage( "wshed_mask.png", 0 );
            cvFindContours( marker_mask, storage, &contours, sizeof(CvContour),
                            CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
            cvZero( markers );
            for( ; contours != 0; contours = contours->h_next, comp_count++ )
            {
                cvDrawContours( markers, contours, cvScalarAll(comp_count+1),
                                cvScalarAll(comp_count+1), -1, -1, 8, cvPoint(0,0) );
            }

            color_tab = cvCreateMat( 1, comp_count, CV_8UC3 );
            for( i = 0; i < comp_count; i++ )
            {
                uchar* ptr = color_tab->data.ptr + i*3;
                ptr[0] = (uchar)(cvRandInt(&rng)%180 + 50);
                ptr[1] = (uchar)(cvRandInt(&rng)%180 + 50);
                ptr[2] = (uchar)(cvRandInt(&rng)%180 + 50);
            }

            {
            double t = (double)cvGetTickCount();
            cvWatershed( img0, markers );
            t = (double)cvGetTickCount() - t;
            printf( "exec time = %gms\n", t/(cvGetTickFrequency()*1000.) );
            }

            // paint the watershed image
            for( i = 0; i < markers->height; i++ )
                for( j = 0; j < markers->width; j++ )
                {
                    int idx = CV_IMAGE_ELEM( markers, int, i, j );
                    uchar* dst = &CV_IMAGE_ELEM( wshed, uchar, i, j*3 );
                    if( idx == -1 )
                        dst[0] = dst[1] = dst[2] = (uchar)255;
                    else if( idx <= 0 || idx > comp_count )
                        dst[0] = dst[1] = dst[2] = (uchar)0; // should not get here
                    else
                    {
                        uchar* ptr = color_tab->data.ptr + (idx-1)*3;
                        dst[0] = ptr[0]; dst[1] = ptr[1]; dst[2] = ptr[2];
                    }
                }

            cvAddWeighted( wshed, 0.5, img_gray, 0.5, 0, wshed );
            cvShowImage( "watershed transform", wshed );
            cvReleaseMemStorage( &storage );
            cvReleaseMat( &color_tab );
        }
    }
예제 #10
0
Mat HUD(Mat videoFeed, int sizex, int sizey) {
	// Read HUD image
	Mat image;
	if (gameOn && !peoplePicked && !cratePicked)
		image = imread("..\\..\\src\\resource\\hudg.png", -1);
	else if (gameOn && !peoplePicked && cratePicked)
		image = imread("..\\..\\src\\resource\\hudc.png", -1);
	else if (gameOn && peoplePicked && !cratePicked)
		image = imread("..\\..\\src\\resource\\hudp.png", -1);
	else if (gameOn && peoplePicked && cratePicked)
		image = imread("..\\..\\src\\resource\\hudpc.png", -1);
	else
		image = imread("..\\..\\src\\resource\\hud.png", -1);

	// Create a buffer of the image
	Mat buffer;
	videoFeed.copyTo(buffer);

	// Resized HUD image
	Mat rImage;

	// Resize HUD to fit window
	resize(image, rImage, Size(sizex, sizey), 0, 0, INTER_CUBIC);

	// Create a matrix to mix the video feed with the HUD image
	Mat result;

	// Overlay the HUD over the video feed
	OverlayImage(videoFeed, rImage, result, Point(0, 0));


	// Draw the crosshair
	Point2f point(sizex / 2, sizey / 2);
	circle(buffer, point, 10, Scalar(255, 255, 0), -1);


	// Display info on to HUD
	ostringstream str; // string stream
	ostringstream str2; // string stream
	ostringstream str3; // string stream
	ostringstream str4; // string stream
	ostringstream str5; // string stream

	// Absolute control flag
	str << "Absolute control : " << absoluteControl;
	putText(result, str.str(), Point(10, 90), CV_FONT_HERSHEY_PLAIN, 1.2, CV_RGB(0, 250, 0));

	// Battery
	str2 << ardrone.getBatteryPercentage();
	putText(result, str2.str(), Point(180, 33), CV_FONT_HERSHEY_PLAIN, 2, CV_RGB(0, 250, 0), 2);

	// Altitude
	str3 << ardrone.getAltitude();
	putText(result, str3.str(), Point(440, 33), CV_FONT_HERSHEY_PLAIN, 2, CV_RGB(0, 250, 0), 2);

	// Show game info when game starts
	if (gameOn) {
		// Points
		str4 << points;
		putText(result, str4.str(), Point(WIDTH / 2.1, 45), CV_FONT_HERSHEY_PLAIN, 1, CV_RGB(0, 250, 0), 2);

		// Time
		int now = cvGetTickCount();
		int passedSinceStart = ((now - gameTimeStart) / (cvGetTickFrequency() * 1000)) / 1000;
		gameTimeLeft = TIME_LIMIT - passedSinceStart;
		str5 << gameTimeLeft;
		if (isDroneConnected)
			putText(result, str5.str(), Point(WIDTH / 2.3, HEIGHT - 25), CV_FONT_HERSHEY_PLAIN, 2, CV_RGB(0, 250, 0), 2);
		else
			putText(result, str5.str(), Point(WIDTH / 2.2, WEBCAM_HEIGHT - 20), CV_FONT_HERSHEY_PLAIN, 2, CV_RGB(0, 250, 0), 2);
	}


	// Combine buffer with original image + opacity
	double opacity = 0.2;
	addWeighted(buffer, opacity, result, 1 - opacity, 0, result);

	return result;
}
예제 #11
0
int main( int argc, char** argv )
{
    char* filename = argc >= 2 ? argv[1] : (char*)"fruits.jpg";
     CvTermCriteria termcrit=cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,5,1);
	double spRadius = 20.0, colorRadius = 40.0;
	int max_level = 2;

    if( (src = cvLoadImage(filename,-1)) == 0 )
        return 0;

	help();   
    cvNamedWindow( "Image", 1 );
	cvNamedWindow( "Segmented", 1);

    dst = cvCloneImage( src );
    cvShowImage( "Image", src );

    for(;;)
    {
        char c;
		do {//In linux, we get rid of shift codes which turn c negative.
			c = cvWaitKey(0);
			if(c < 0) c = -c;
		} while( (c == 30) || (c == 31)); //Shift codes
//		printf("c=%d\n",c);

        if(( (char)c == 27) || ((char)c == 'q') || ((char)c == 'Q') )//Quit
            break;

		switch(c){
			case 'H':
			case 'h':
				help();
				break;
			case's': //save image as watershed.jpg	
            	cvSaveImage("pyrMeanShiftInput.jpg",src);
             	cvSaveImage("pyrMeanShiftOutput.jpg",dst);
             	printf("Saved image pyrMeanShift*.jpg\n");
				break;
			case 'm':
				max_level -= 1;
				if(max_level <0) max_level = 0;
				printf("max_level = %d\n",max_level);
				break;
			case 'M'://"
				max_level += 1;
				printf("max_level = %d\n",max_level);
				break;
			case 'r':
				spRadius -= 1.0;
				if(spRadius < 1.0) spRadius = 1.0;
				printf("spRadius = %lf\n",spRadius);
				break;
			case 'R':
				spRadius += 1.0;
				printf("spRadius = %lf\n",spRadius);
				break;
			case 'c':
				colorRadius -= 1.0;
				if(colorRadius < 1.0) colorRadius = 1.0;
				printf("colorRadius = %lf\n",colorRadius);
				break;
			case 'C':
				colorRadius += 1.0;
				printf("colorRadius = %lf\n",colorRadius);
				break;
			case 'p':
			case '\n':
	           	double t = (double)cvGetTickCount();
				cvPyrMeanShiftFiltering( src, dst, spRadius, colorRadius, max_level, termcrit);
	           	t = (double)cvGetTickCount() - t;
				printf("For params image(x,y)=(%d,%d) max_level=%d, Radius color=%lf, space=%lf\n",
						src->width,src->height,max_level,colorRadius,spRadius);
            	printf( "exec time = %gms\n", t/(cvGetTickFrequency()*1000.) );
    	        cvShowImage( "Segmented", dst );
  				break;
		}
    }

    return 1;
}
예제 #12
0
void show_detecttime(double t )
{
	t = (double)cvGetTickCount() - t;
    printf( "detection time = %gms\n", t/((double)cvGetTickFrequency()*1000.) );
}
예제 #13
0
/**
 * @author      JIA Pei
 * @version     2010-05-20
 * @brief       CMU ICIA AAM Fitting, for dynamic image sequence
 * @param       iImg            Input - image to be fitted
 * @param       ioShape         Input and Output - the fitted shape
 * @param       oImg            Output - the fitted image
 * @param       epoch           Input - the iteration epoch
*/
float VO_FittingAAMInverseIA::VO_ICIAAAMFitting(const Mat& iImg,
                                                VO_Shape& ioShape,
                                                Mat& oImg,
                                                unsigned int epoch)
{
    this->m_VOFittingShape.clone(ioShape);
    this->m_VOEstimatedShape.clone(this->m_VOFittingShape);
double t = (double)cvGetTickCount();

    this->SetProcessingImage(iImg, this->m_VOAAMInverseIA);
    this->m_iIteration = 0;

    // Get m_MatCurrentP and m_MatCurrentQ
    this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape,
                                                                    this->m_MatCurrentP,
                                                                    this->m_fScale,
                                                                    this->m_vRotateAngles,
                                                                    this->m_MatCenterOfGravity);
    this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);

    this->m_MatDeltaP       = Mat_<float>::zeros(this->m_MatDeltaP.size());
    this->m_MatDeltaQ       = Mat_<float>::zeros(this->m_MatDeltaQ.size());
    this->m_MatCurrentQ     = Mat_<float>::zeros(this->m_MatCurrentQ.size());
    this->m_MatDeltaPQ      = Mat_<float>::zeros(this->m_MatDeltaPQ.size());

    // Step (1) Warp I with W(x;p) followed by N(x;q) to compute I(N(W(x;p);q))
    this->VO_PParamQParam2FittingShape( this->m_MatCurrentP,
                                        this->m_MatCurrentQ,
                                        this->m_VOFittingShape,
                                        this->m_fScale,
                                        this->m_vRotateAngles,
                                        this->m_MatCenterOfGravity );
    this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);

    // Step (2) Compute the error image I(N(W(x;p);q))-A0(x)
    this->m_E_previous = this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing,
                                                            this->m_VOFittingShape,
                                                            this->m_VOTemplateNormalizedTexture,
                                                            this->m_VOTextureError);

    do
    {
        ++this->m_iIteration;

        // Step (7) -- a bit modification
        cv::gemm(this->m_VOTextureError.GetTheTextureInARow(), this->m_VOAAMInverseIA->m_MatICIAPreMatrix, -1, Mat(), 0, this->m_MatDeltaPQ, GEMM_2_T);

        // Step (8) -- a bit modification. Get DeltaP DeltaQ respectively
        this->m_MatDeltaQ = this->m_MatDeltaPQ(Rect( 0, 0, this->m_MatDeltaQ.cols, 1));
        this->m_MatDeltaP = this->m_MatDeltaPQ(Rect( this->m_MatDeltaQ.cols, 0, this->m_MatDeltaP.cols, 1));

        // Step (9) -- CMU Inverse Compositional
        this->VO_CMUInverseCompositional( this->m_MatDeltaP, this->m_MatDeltaQ, this->m_VOFittingShape, this->m_VOEstimatedShape );
        
        // Ensure Inverse Compositional still satisfies global shape constraints
        this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOEstimatedShape,
                                                                        this->m_MatEstimatedP,
                                                                        this->m_fScale,
                                                                        this->m_vRotateAngles,
                                                                        this->m_MatCenterOfGravity);
        this->m_VOEstimatedShape.ConstrainShapeInImage(this->m_ImageProcessing);

        this->m_E = this->VO_CalcErrorImage(this->m_ImageProcessing,
                                            this->m_VOEstimatedShape,
                                            this->m_VOTemplateNormalizedTexture,
                                            this->m_VOEstimatedTextureError);

        if (this->m_E < this->m_E_previous)
        {
            // Unlike what's happening in Basic AAM, 
            // since m_fScale, m_vRotateAngles and m_MatCenterOfGravity have not been updated in ICIA,
            // m_MatCurrentT should not be assigned to 0 now!
//            this->m_MatCurrentQ = Mat_<float>::zeros(this->m_MatCurrentQ.size());
            this->m_VOFittingShape.clone(this->m_VOEstimatedShape);
            this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape,
                                                                            this->m_MatCurrentP,
                                                                            this->m_fScale,
                                                                            this->m_vRotateAngles,
                                                                            this->m_MatCenterOfGravity);
            this->m_VOFittingShape.ConstrainShapeInImage(this->m_ImageProcessing);
            this->m_VOTextureError.clone(this->m_VOEstimatedTextureError);
            this->m_E_previous = this->m_E;

        }
        else
            break;

    }while( ( fabs(this->m_E) > FLT_EPSILON ) && ( this->m_iIteration < epoch ) );

    VO_Fitting2DSM::VO_DrawMesh(this->m_VOFittingShape, this->m_VOAAMInverseIA, oImg);

    // Recalculate all parameters finally, this is also optional.
    this->m_VOAAMInverseIA->VO_CalcAllParams4AnyShapeWithConstrain( this->m_VOFittingShape,
                                                                    this->m_MatCurrentP,
                                                                    this->m_fScale,
                                                                    this->m_vRotateAngles,
                                                                    this->m_MatCenterOfGravity );

    // Step (10) (Option step), Post-computation. Get m_MatModelNormalizedTextureParam
    VO_TextureModel::VO_LoadOneTextureFromShape(this->m_VOFittingShape,
                                                this->m_ImageProcessing,
                                                this->m_vTriangle2D,
                                                this->m_vPointWarpInfo,
                                                this->m_VOFittingTexture );
    // estimate the texture model parameters
    this->m_VOAAMInverseIA->VO_CalcAllParams4AnyTexture(this->m_VOFittingTexture, this->m_MatModelNormalizedTextureParam);
    ioShape.clone(this->m_VOFittingShape);

t = ((double)cvGetTickCount() -  t )/  (cvGetTickFrequency()*1000.);
cout << "ICIA AAM fitting time cost: " << t << " millisec" << endl;
this->m_fFittingTime = t;

    return t;
}
예제 #14
0
int main(int argc, char **argv)
{
    // Initialize ROS
    ros::init(argc, argv, "ic2020_vodom");
    ros::NodeHandle n;   

    ros::Subscriber surf_sub = n.subscribe("/surf/keyframes", 5, optflowCallback);
    ros::Publisher vodom_pub = n.advertise<ic2020_vodom::keyframe>("/vodom/keyframes", 100);    

    // Wait for video streams to be up
    BlockWhileWaitingForVideo();  

    // Create display images
	view_im = cvCreateImage( cvSize(2*newArrival->width, newArrival->height), 8, IPL_PXL_BYTES );

	#ifdef VISUALIZE
	cvNamedWindow("VisualOdom", CV_WINDOW_AUTOSIZE);
	#endif

    // Main loop
    printf("Entering main loop\n");

    while (ros::ok())
    {
        char c = cvWaitKey(5);
        if (c == 'Q' || c == 'q')
            break; 

        // Get Images
        ros::spinOnce();

        // Check if new keyframe is available
        if (newArrival == NULL) { continue; }
        printf ("\33[2J");
        
        // Rotate in new data
        RotateNewArrivalIn();
        
        /**********************************
            Check we have two keyframes
        ***********************************/
        if (kA == 0 || kB == 0) { continue; }
        
        printf("Keyframe A: %i\n", kA->keyframe_num);
        printf("Keyframe B: %i\n", kB->keyframe_num);
        
        // COPY IMAGE DATA TO DOUBLE SIZE IMAGE
        cvSetImageROI( view_im, cvRect(0, 0, kB->im->width, kB->im->height));
        cvCopy( kB->im, view_im );
        cvSetImageROI( view_im, cvRect(kB->im->width, 0, kA->im->width, kA->im->height));
        cvCopy( kA->im, view_im );
        cvResetImageROI( view_im );

        // DRAW RED CIRCLES ON FEATURES
        for (unsigned int i = 0; i < kB->features.size(); i++) {
            cvCircle(view_im, cvPoint(cvRound(kB->features[i].point2D[0]),
                     cvRound(kB->features[i].point2D[1])), 3.0f, colors[0], 2, 8);
        }
        for (unsigned int i = 0; i < kA->features.size(); i++) {
            cvCircle(view_im, cvPoint(cvRound(kA->features[i].point2D[0]) + kB->im->width, 
                     cvRound(kA->features[i].point2D[1])), 3.0f, colors[0], 2, 8);              
        }
        for (unsigned int i = 0; i < kB->numCorn1; i++) {
            cvCircle(view_im, cvPoint(cvRound(kB->corn1[i].point2D[0]),
                     cvRound(kB->corn1[i].point2D[1])), 3.0f, colors[1], 1, 8);
        }
        for (unsigned int i = 0; i < kA->numCorn2; i++) {
            cvCircle(view_im, cvPoint(cvRound(kA->corn2[i].point2D[0]) + kB->im->width, 
                     cvRound(kA->corn2[i].point2D[1])), 3.0f, colors[1], 1, 8);              
        }
        
        /**********************************
          Initial RANSAC w SURF and STCorn
        ***********************************/
        
        // GET SURF PAIRS
        tt = (double)cvGetTickCount();
        std::vector<unsigned int> pairs;
        SURFHelper::findSURFPairs(&kA->descBuffer, &kB->descBuffer, pairs);
        tt = (double)cvGetTickCount() - tt;
        //printf( "SURF Match Time = %gms\n", tt/(cvGetTickFrequency()*1000.));        
        printf( "Found %i SURF Matches \n", pairs.size()/2);
        
        // RANSAC
        std::vector<unsigned int> filtered_surf_pairs;
        std::vector<unsigned int> filtered_corn_pairs;
        tt = (double)cvGetTickCount();
        if (kA->numCorn2 == kB->numCorn1) {
            if (!VisualOdometry::RANSAC6DFast(&kA->features, &kB->features, &pairs, &filtered_surf_pairs,
                                          &kA->corn2[0], &kB->corn1[0], &kB->status[0], kB->numCorn1, &filtered_corn_pairs,
                                          kB->im->width, kB->im->height, 10, 10, 1)) 
            //if (!VisualOdometry::RANSAC6D(&kA->features, &kB->features, &pairs, &filtered_surf_pairs,
            //                              &kA->corn2[0], &kB->corn1[0], &kB->status[0], kB->numCorn1, &filtered_corn_pairs)) 
            //if (!VisualOdometry::RANSAC6DReproj(&kA->features, &kB->features, &pairs, &filtered_surf_pairs))
            {
                printf("RANSAC MATCHES FEATURE # AREN'T EQUAL OR LESS THAN 7 FEATURES \n");
                continue;
            }
        } else {
            printf("WTF KEYFRAME A's FORWARD ST FEATURES != KEYFRAME B's BACK ST FEATURES \n");
        }
        tt = (double)cvGetTickCount() - tt;
        printf( "RANSAC Time = %gms\n", tt/(cvGetTickFrequency()*1000.));        

        // Create index links from B to A
        std::vector<int> revReferenceA;
        for(unsigned int i = 0; i < (unsigned int)kA->features.size(); i++) { revReferenceA.push_back(-1); }
        for(unsigned int i = 0; i < (unsigned int)filtered_surf_pairs.size()/2; i++ )
        {
            int a = filtered_surf_pairs[2*i+0];
            int b = filtered_surf_pairs[2*i+1]; 
            kB->surfMatches[b] = a;
            revReferenceA[a] = b;
        } 
     
        // Remove Useless SURF Features
        std::vector<feature> tfeatures;
        std::vector<surfdesc> tdescBuffer;
        std::vector<int> tsurfMatches;
        for (unsigned int i = 0; i < kA->features.size(); i++) {
            if (revReferenceA[i] >= 0) { // is being matched in the next frame
                tfeatures.push_back(kA->features[i]);
                tdescBuffer.push_back(kA->descBuffer[i]);
                tsurfMatches.push_back(kA->surfMatches[i]);
                kB->surfMatches[revReferenceA[i]] = tfeatures.size() - 1;
            }
            else if (kA->surfMatches[i] >= 0) { // has a match in the previous frame
                tfeatures.push_back(kA->features[i]);
                tdescBuffer.push_back(kA->descBuffer[i]);
                tsurfMatches.push_back(kA->surfMatches[i]);
            }
        }
        kA->features = tfeatures;
        kA->descBuffer = tdescBuffer;
        kA->surfMatches = tsurfMatches;
     
        // CREATE VECTOR OF MATCHES
        std::vector<feature> matchesA2;
        std::vector<feature> matchesB2;
        // ADD IN SURF MATCHES
        for(unsigned int i = 0; i < (unsigned int)filtered_surf_pairs.size()/2; i++ )
        {
            //int a = filtered_surf_pairs[2*i+0];
            int b = filtered_surf_pairs[2*i+1]; 
            int a = kB->surfMatches[b];
            matchesA2.push_back(kA->features[a]);
            matchesB2.push_back(kB->features[b]);
        } 
     
        // ADD IN CORNER MATCHES
        for(unsigned int i = 0; i < kB->numCorn1; i++ )
        {
            if (filtered_corn_pairs[i] > 0) {
                matchesA2.push_back(kA->corn2[i]);
                matchesB2.push_back(kB->corn1[i]);
            } else {
                kB->status[i] = 0;
            }
        }
        
        // Print Green Circles Over RANSAC Points
        for (unsigned int i = 0; i < matchesA2.size(); i++) {
            float lx = matchesB2[i].point2D[0];
            float ly = matchesB2[i].point2D[1];
            float lx2 = matchesA2[i].point2D[0];
            float ly2 = matchesA2[i].point2D[1];
            cvCircle(view_im,cvPoint(cvRound(lx), cvRound(ly)), 3.0f, colors[3], 2, 8);
            cvCircle(view_im, cvPoint(cvRound(lx2) + kA->im->width, cvRound(ly2)), 3.0f, colors[3], 2, 8);  
            cvLine(view_im, cvPoint(cvRound(lx), cvRound(ly)), cvPoint(cvRound(lx2), cvRound(ly2)), colors[3] );
        }

        // Least Squares
        double rdata[9];
        double translation[3];
        
        VisualOdometry::ArunLeastSquares(&matchesA2, &matchesB2, rdata, translation);      
        
        for (unsigned int i = 0; i < 9; i++) { kB->rotation[i] = rdata[i]; }
        for (unsigned int i = 0; i < 3; i++) { kB->translation[i] = translation[i]; }
        
        /*********************
            ICP
        **********************/
        // Setup Images and Contours
        
	/*
        // Call ICP
        double rdata2[9];
        double translation2[3];
        VisualOdometry::ICPKeyframes(kA, kB, rdata, translation, rdata2, translation2);
        for (unsigned int i = 0; i < 9; i++) { kB->rotation[i] = rdata2[i]; }
        for (unsigned int i = 0; i < 3; i++) { kB->translation[i] = translation2[i];}
	*/
        /*********************
            Publish Frame
        **********************/        
        
        // Print Rotation and Translation
        double pitch = atan2(-rdata[6], sqrt(pow(rdata[0],2.0)+pow(rdata[3],2.0)));
        double yaw = atan2(rdata[3]/cos(pitch), rdata[0]/cos(pitch));
        double roll = atan2(rdata[7]/cos(pitch), rdata[8]/cos(pitch));
        printf("pit yaw rol: %f %f %f\n",pitch,yaw,roll);
        printf("translation: %f %f %f\n",translation[0],translation[1],translation[2]); 
        
        /*double pitch2 = atan2(-rdata2[6], sqrt(pow(rdata2[0],2.0)+pow(rdata2[3],2.0)));
        double yaw2 = atan2(rdata2[3]/cos(pitch), rdata2[0]/cos(pitch));
        double roll2 = atan2(rdata2[7]/cos(pitch), rdata2[8]/cos(pitch));
        printf("icp pit yaw rol: %f %f %f\n",pitch2,yaw2,roll2);
        printf("icp translation: %f %f %f\n",translation2[0],translation2[1],translation2[2]); */

        // Publish Point Clouds
        printf("publishing\n");
        kA->PublishKeyframe(&vodom_pub);
        printf("done publishing\n");

	#ifdef VISUALIZE
        // Show stereo image
        cvShowImage("VisualOdom", view_im);
	#endif
    }

    return 0;
}
void LiveSLAMWrapper::BALoop()
{
    ros::Rate BARate(2000) ;
    list<ImageMeasurement>::iterator iterImage ;
    std::list<visensor_node::visensor_imu>::iterator iterIMU ;
    cv::Mat image0 ;
    cv::Mat image1 ;
    cv::Mat gradientMapForDebug(height, width, CV_8UC3) ;
    sensor_msgs::Image msg;
    double t ;

    while ( nh.ok() )
    {
        monoOdometry->frameInfoList_mtx.lock();
        int ttt = (monoOdometry->frameInfoListTail - monoOdometry->frameInfoListHead);
        if ( ttt < 0 ){
            ttt += frameInfoListSize ;
        }
        //printf("[BA thread] sz=%d\n", ttt ) ;
        if ( ttt < 1 ){
            monoOdometry->frameInfoList_mtx.unlock();
            BARate.sleep() ;
            continue ;
        }
        for ( int sz ; ; )
        {
            monoOdometry->frameInfoListHead++ ;
            if ( monoOdometry->frameInfoListHead >= frameInfoListSize ){
                monoOdometry->frameInfoListHead -= frameInfoListSize ;
            }
            sz = monoOdometry->frameInfoListTail - monoOdometry->frameInfoListHead ;
            if ( sz == 0 ){
                break ;
            }
            if ( monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].keyFrameFlag ){
                break ;
            }
        }
        ros::Time imageTimeStamp = monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].t ;
        monoOdometry->frameInfoList_mtx.unlock();

        //Pop out the image list
        image1_queue_mtx.lock();
        iterImage = image1Buf.begin() ;
        while ( iterImage->t < imageTimeStamp ){
            iterImage = image1Buf.erase( iterImage ) ;
        }
        image1 = iterImage->image.clone();
        image1_queue_mtx.unlock();

        image0_queue_mtx.lock();
        iterImage = image0Buf.begin() ;
        while ( iterImage->t < imageTimeStamp ){
            iterImage = image0Buf.erase( iterImage ) ;
        }
        image0 = iterImage->image.clone();
        image0_queue_mtx.unlock();

        imu_queue_mtx.lock();
        iterIMU = imuQueue.begin() ;
        Vector3d linear_acceleration;
        Vector3d angular_velocity;

        //std::cout << "imageTime=" << imageTimeStamp << std::endl;
        while ( iterIMU->header.stamp < imageTimeStamp )
        {
            linear_acceleration(0) = iterIMU->linear_acceleration.x;
            linear_acceleration(1) = iterIMU->linear_acceleration.y;
            linear_acceleration(2) = iterIMU->linear_acceleration.z;
            angular_velocity(0) = iterIMU->angular_velocity.x;
            angular_velocity(1) = iterIMU->angular_velocity.y;
            angular_velocity(2) = iterIMU->angular_velocity.z;

            //linear_acceleration = -linear_acceleration;
            //angular_velocity = -angular_velocity ;

//            double pre_t = iterIMU->header.stamp.toSec();
//            iterIMU = imuQueue.erase(iterIMU);

//            //std::cout << imuQueue.size() <<" "<< iterIMU->header.stamp << std::endl;

//            double next_t = iterIMU->header.stamp.toSec();
//            double dt = next_t - pre_t ;

            double pre_t = iterIMU->header.stamp.toSec();
            iterIMU = imuQueue.erase(iterIMU);
            double next_t = iterIMU->header.stamp.toSec();
            double dt = next_t - pre_t ;

//            std::cout << linear_acceleration.transpose() << std::endl ;
//            std::cout << angular_velocity.transpose() << std::endl ;
            monoOdometry->processIMU( dt, linear_acceleration, angular_velocity );
        }
        imu_queue_mtx.unlock();

        //propagate the last frame info to the current frame
        Frame* lastFrame = monoOdometry->slidingWindow[monoOdometry->tail].get();
        float dt = lastFrame->timeIntegral;

        Vector3d T_bk1_2_b0 = lastFrame->T_bk_2_b0 - 0.5 * gravity_b0 * dt *dt
                + lastFrame->R_bk_2_b0*(lastFrame->v_bk * dt  + lastFrame->alpha_c_k);
        Vector3d v_bk1 = lastFrame->R_k1_k.transpose() *
                (lastFrame->v_bk - lastFrame->R_bk_2_b0.transpose() * gravity_b0 * dt
                 + lastFrame->beta_c_k);
        Matrix3d R_bk1_2_b0 = lastFrame->R_bk_2_b0 * lastFrame->R_k1_k;

        monoOdometry->insertFrame(imageSeqNumber, image1, imageTimeStamp, R_bk1_2_b0, T_bk1_2_b0, v_bk1);
        Frame* currentFrame = monoOdometry->slidingWindow[monoOdometry->tail].get();
        Frame* keyFrame = monoOdometry->currentKeyFrame.get();
        if ( monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].keyFrameFlag )
        {
            //prepare key frame
            cv::Mat disparity, depth ;
            monoOdometry->bm_(image1, image0, disparity, CV_32F);
            calculateDepthImage(disparity, depth, 0.11, fx );
            currentFrame->setDepthFromGroundTruth( (float*)depth.data ) ;

            //pub debugMap
            cv::cvtColor(image1, gradientMapForDebug, CV_GRAY2BGR);
            monoOdometry->generateDubugMap(currentFrame, gradientMapForDebug);
            msg.header.stamp = imageTimeStamp;
            sensor_msgs::fillImage(msg, sensor_msgs::image_encodings::BGR8, height,
                                   width, width*3, gradientMapForDebug.data );
            monoOdometry->pub_gradientMapForDebug.publish(msg) ;

            //set key frame
            monoOdometry->currentKeyFrame = monoOdometry->slidingWindow[monoOdometry->tail] ;
            monoOdometry->currentKeyFrame->keyFrameFlag = true ;
            monoOdometry->currentKeyFrame->cameraLinkList.clear() ;
            //reset the initial guess
            monoOdometry->RefToFrame = Sophus::SE3() ;

            //unlock dense tracking
            monoOdometry->tracking_mtx.lock();
            monoOdometry->lock_densetracking = false;
            monoOdometry->tracking_mtx.unlock();
        }
        if ( monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].trust )
        {
//            cout << "insert camera link" << endl ;
//            cout << monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].T_k_2_c << endl ;

            monoOdometry->insertCameraLink(keyFrame, currentFrame,
                          monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].R_k_2_c,
                          monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].T_k_2_c,
                          monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].lastestATA );
        }

        cout << "[-BA]current Position: " << currentFrame->T_bk_2_b0.transpose() << endl;
        cout << "[-BA]current Velocity: " << currentFrame->v_bk.transpose() << endl;

        //BA
        t = (double)cvGetTickCount()  ;
        monoOdometry->BA();
        printf("BA cost time: %f\n", ((double)cvGetTickCount() - t) / (cvGetTickFrequency() * 1000) );
        t = (double)cvGetTickCount()  ;

        cout << "[BA-]current Position: " << currentFrame->T_bk_2_b0.transpose() << endl;
        cout << "[BA-]current Velocity: " << currentFrame->v_bk.transpose() << endl;

        //marginalziation
        monoOdometry->twoWayMarginalize();
        monoOdometry->setNewMarginalzationFlag();

        //    pubOdometry(-T_bk1_2_b0, R_bk1_2_b0, pub_odometry, pub_pose );
        //    pubPath(-T_bk1_2_b0, path_line, pub_path );

        pubOdometry(-monoOdometry->slidingWindow[monoOdometry->tail]->T_bk_2_b0,
                monoOdometry->slidingWindow[monoOdometry->tail]->R_bk_2_b0,
                monoOdometry->pub_odometry, monoOdometry->pub_pose );
        pubPath(-monoOdometry->slidingWindow[monoOdometry->tail]->T_bk_2_b0,
                monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].keyFrameFlag,
                monoOdometry->path_line, monoOdometry->pub_path);
    }
}
예제 #16
0
void LiveSLAMWrapper::BALoop()
{
    ros::Rate BARate(2000) ;
    list<ImageMeasurement>::iterator iterImage ;
    std::list<visensor_node::visensor_imu>::iterator iterIMU ;
    cv::Mat image0 ;
    cv::Mat image1 ;
    cv::Mat gradientMapForDebug(height, width, CV_8UC3) ;
    sensor_msgs::Image msg;
    double t ;

    while ( nh.ok() )
    {
        monoOdometry->frameInfoList_mtx.lock();
        int ttt = (monoOdometry->frameInfoListTail - monoOdometry->frameInfoListHead);
        if ( ttt < 0 ){
            ttt += frameInfoListSize ;
        }
        //printf("[BA thread] sz=%d\n", ttt ) ;
        if ( ttt < 1 ){
            monoOdometry->frameInfoList_mtx.unlock();
            BARate.sleep() ;
            continue ;
        }
        for ( int sz ; ; )
        {
            monoOdometry->frameInfoListHead++ ;
            if ( monoOdometry->frameInfoListHead >= frameInfoListSize ){
                monoOdometry->frameInfoListHead -= frameInfoListSize ;
            }
            sz = monoOdometry->frameInfoListTail - monoOdometry->frameInfoListHead ;
            if ( sz == 0 ){
                break ;
            }
            if ( monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].keyFrameFlag ){
                break ;
            }
        }
        ros::Time imageTimeStamp = monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].t ;
        monoOdometry->frameInfoList_mtx.unlock();

        //Pop out the image list
        image1_queue_mtx.lock();
        iterImage = image1Buf.begin() ;
        while ( iterImage->t < imageTimeStamp ){
            iterImage = image1Buf.erase( iterImage ) ;
        }
        image1 = iterImage->image.clone();
        image1_queue_mtx.unlock();

        image0_queue_mtx.lock();
        iterImage = image0Buf.begin() ;
        while ( iterImage->t < imageTimeStamp ){
            iterImage = image0Buf.erase( iterImage ) ;
        }
        image0 = iterImage->image.clone();
        image0_queue_mtx.unlock();

        imu_queue_mtx.lock();
        iterIMU = imuQueue.begin() ;
        Vector3d linear_acceleration;
        Vector3d angular_velocity;

        //std::cout << "imageTime=" << imageTimeStamp << std::endl;
        while ( iterIMU->header.stamp < imageTimeStamp )
        {
            linear_acceleration(0) = iterIMU->linear_acceleration.x;
            linear_acceleration(1) = iterIMU->linear_acceleration.y;
            linear_acceleration(2) = iterIMU->linear_acceleration.z;
            angular_velocity(0) = iterIMU->angular_velocity.x;
            angular_velocity(1) = iterIMU->angular_velocity.y;
            angular_velocity(2) = iterIMU->angular_velocity.z;

//            to_pub_info.x = angular_velocity(0)*180/PI ;
//            to_pub_info.y = angular_velocity(1)*180/PI ;
//            to_pub_info.z = angular_velocity(2)*180/PI ;
//            monoOdometry->pub_angular_velocity.publish( to_pub_info ) ;

//            outFile << to_pub_info.x << " "
//                    << to_pub_info.y << " "
//                    << to_pub_info.z << "\n";


            double pre_t = iterIMU->header.stamp.toSec();
            iterIMU = imuQueue.erase(iterIMU);
            double next_t = iterIMU->header.stamp.toSec();
            double dt = next_t - pre_t ;

//            std::cout << linear_acceleration.transpose() << std::endl ;
//            std::cout << angular_velocity.transpose() << std::endl ;
            monoOdometry->processIMU( dt, linear_acceleration, angular_velocity );
        }
        imu_queue_mtx.unlock();

        //propagate the last frame info to the current frame
        Frame* lastFrame = monoOdometry->slidingWindow[monoOdometry->tail].get();
        float dt = lastFrame->timeIntegral;

        Vector3d T_bk1_2_b0 = lastFrame->T_bk_2_b0 - 0.5 * gravity_b0 * dt *dt
                + lastFrame->R_bk_2_b0*(lastFrame->v_bk * dt  + lastFrame->alpha_c_k);
        Vector3d v_bk1 = lastFrame->R_k1_k.transpose() *
                (lastFrame->v_bk - lastFrame->R_bk_2_b0.transpose() * gravity_b0 * dt
                 + lastFrame->beta_c_k);
        Matrix3d R_bk1_2_b0 = lastFrame->R_bk_2_b0 * lastFrame->R_k1_k;

        monoOdometry->insertFrame(imageSeqNumber, image0, imageTimeStamp, R_bk1_2_b0, T_bk1_2_b0, v_bk1);
        Frame* currentFrame = monoOdometry->slidingWindow[monoOdometry->tail].get();
        Frame* keyFrame = monoOdometry->currentKeyFrame.get();
        if ( monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].keyFrameFlag )
        {
            //prepare key frame
            cv::Mat disparity, depth ;
            monoOdometry->bm_(image0, image1, disparity, CV_32F);
            calculateDepthImage(disparity, depth, 0.11, fx );
            currentFrame->setDepthFromGroundTruth( (float*)depth.data ) ;

#ifdef PRINT_DEBUG_INFO
            //pub debugMap
            cv::cvtColor(image0, gradientMapForDebug, CV_GRAY2BGR);
            monoOdometry->generateDubugMap(currentFrame, gradientMapForDebug);
            msg.header.stamp = imageTimeStamp;
            sensor_msgs::fillImage(msg, sensor_msgs::image_encodings::BGR8, height,
                                   width, width*3, gradientMapForDebug.data );
            monoOdometry->pub_gradientMapForDebug.publish(msg) ;
#endif
            int preKeyFrameID = monoOdometry->currentKeyFrame->id() ;

            //set key frame
            monoOdometry->currentKeyFrame = monoOdometry->slidingWindow[monoOdometry->tail] ;
            monoOdometry->currentKeyFrame->keyFrameFlag = true ;
            monoOdometry->currentKeyFrame->cameraLinkList.clear() ;

            //reset the initial guess
            monoOdometry->RefToFrame = Sophus::SE3() ;

            //update tracking reference
            monoOdometry->updateTrackingReference();

#ifdef PUB_POINT_CLOUD
            pubPointCloud(valid_num, imageTimeStamp, R_vi_2_odometry);
#endif
            //unlock dense tracking
            monoOdometry->tracking_mtx.lock();
            monoOdometry->lock_densetracking = false;
            monoOdometry->tracking_mtx.unlock();

            if ( (imageTimeStamp - lastLoopClorsureTime).toSec() > 0.18 )
            {
                //add possible loop closure link
                t = (double)cvGetTickCount()  ;
                monoOdometry->setReprojectionListRelateToLastestKeyFrame( monoOdometry->head, preKeyFrameID,
                                                                         monoOdometry->slidingWindow[monoOdometry->tail].get() ) ;
                ROS_WARN("loop closure link cost time: %f", ((double)cvGetTickCount() - t) / (cvGetTickFrequency() * 1000) );
                t = (double)cvGetTickCount()  ;
                lastLoopClorsureTime = imageTimeStamp ;
            }
        }

        if ( monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].trust )
        {
//            cout << "insert camera link" << endl ;
//            cout << monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].T_k_2_c << endl ;

            monoOdometry->insertCameraLink(keyFrame, currentFrame,
                          monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].R_k_2_c,
                          -monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].T_k_2_c,
                          monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].lastestATA );
        }

        int control_flag = 0 ;
        Vector3d preBAt = currentFrame->T_bk_2_b0 ;


//        cout << "[-BA]current Position: " << currentFrame->T_bk_2_b0.transpose() << endl;
//        cout << "[-BA]current Velocity: " << currentFrame->v_bk.transpose() << endl;

        //BA
        t = (double)cvGetTickCount()  ;
        monoOdometry->BA();
        printf("BA cost time: %f\n", ((double)cvGetTickCount() - t) / (cvGetTickFrequency() * 1000) );
        t = (double)cvGetTickCount()  ;

        //cout << "[BA-]current Position: " << currentFrame->T_bk_2_b0.transpose() << endl;
        //cout << "[BA-]current Velocity: " << currentFrame->v_bk.transpose() << endl;

        if ( (currentFrame->T_bk_2_b0 - preBAt ).norm() > 0.1 ){
            control_flag = 1 ; //loop_closure or other sudden position change case
        }
        if ( monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].trust == false ){
            control_flag = 2 ; //only IMU link, dense tracking fails
        }

#ifdef PUB_GRAPH
        pubCameraLink();
#endif

        //marginalziation
        monoOdometry->twoWayMarginalize();
        monoOdometry->setNewMarginalzationFlag();

        pubOdometry(monoOdometry->slidingWindow[monoOdometry->tail]->T_bk_2_b0,
                monoOdometry->slidingWindow[monoOdometry->tail]->v_bk,
                monoOdometry->slidingWindow[monoOdometry->tail]->R_bk_2_b0,
                monoOdometry->pub_odometry, monoOdometry->pub_pose,
                control_flag, R_vi_2_odometry,
                monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].keyFrameFlag, imageTimeStamp );

#ifdef PRINT_DEBUG_INFO
        int colorFlag = 0 ;
        colorFlag = monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].keyFrameFlag ;
        if (  monoOdometry->frameInfoList[monoOdometry->frameInfoListHead].trust == false ){
            colorFlag = 2 ;
        }
        pubPath(monoOdometry->slidingWindow[monoOdometry->tail]->T_bk_2_b0,
                colorFlag,
                monoOdometry->path_line, monoOdometry->pub_path, R_vi_2_odometry);
#endif

#ifdef  PUB_TF
        static tf::TransformBroadcaster br;
        tf::Transform transform;
        Vector3d t_translation = R_vi_2_odometry * monoOdometry->slidingWindow[monoOdometry->tail]->T_bk_2_b0 ;
        Quaterniond t_q(monoOdometry->slidingWindow[monoOdometry->tail]->R_bk_2_b0) ;
        transform.setOrigin(tf::Vector3(t_translation(0),
                                t_translation(1),
                                t_translation(2)) );
        tf::Quaternion q;
        Quaterniond tt_q(R_vi_2_odometry * monoOdometry->slidingWindow[monoOdometry->tail]->R_bk_2_b0 * R_vi_2_odometry.transpose());
        q.setW(tt_q.w());
        q.setX(tt_q.x());
        q.setY(tt_q.y());
        q.setZ(tt_q.z());
        transform.setRotation(q);
        br.sendTransform(tf::StampedTransform(transform, ros::Time::now(), "world", "densebody"));
#endif

//        int preIndex = monoOdometry->tail - 1 ;
//        if ( preIndex < 0 ){
//            preIndex += slidingWindowSize ;
//        }
//        Vector3d tt_dist = (monoOdometry->slidingWindow[monoOdometry->tail]->T_bk_2_b0 -
//                monoOdometry->slidingWindow[preIndex]->T_bk_2_b0) ;
//        Matrix3d tt_rotate = monoOdometry->slidingWindow[monoOdometry->tail]->R_bk_2_b0.transpose() *
//                monoOdometry->slidingWindow[preIndex]->R_bk_2_b0 ;
//        Quaterniond tt_q(tt_rotate) ;

//        to_pub_info.x = monoOdometry->slidingWindow[monoOdometry->tail]->v_bk(0) ;
//        to_pub_info.y = monoOdometry->slidingWindow[monoOdometry->tail]->v_bk(1) ;
//        to_pub_info.z = monoOdometry->slidingWindow[monoOdometry->tail]->v_bk(2) ;
//        monoOdometry->pub_linear_velocity.publish(to_pub_info) ;


    }
}
예제 #17
0
/**
* @brief    whether the tracked shape is really a face?
*           If we can detect both eyes and mouth
*           according to some prior knowledge due to its shape,
*           we may regard this shape correctly describe a face.
* @param    iImg        - input     input image
* @param    iShape      - input     the current tracked shape
* @param    iShapeInfo  - input     shape info
* @param    iFaceParts  - input     face parts
* @return   bool    whether the tracked shape is acceptable?
*/
bool CRecognitionAlgs::EvaluateFaceTrackedByCascadeDetection(
    const CFaceDetectionAlgs* fd,
    const Mat& iImg,
    const VO_Shape& iShape,
    const vector<VO_Shape2DInfo>& iShapeInfo, 
    const VO_FaceParts& iFaceParts)
{
    double t = (double)cvGetTickCount();

    unsigned int ImgWidth       = iImg.cols;
    unsigned int ImgHeight      = iImg.rows;

    vector<unsigned int> leftEyePoints      = iFaceParts.VO_GetOneFacePart(VO_FacePart::LEFTEYE).GetIndexes();
    vector<unsigned int> rightEyePoints     = iFaceParts.VO_GetOneFacePart(VO_FacePart::RIGHTEYE).GetIndexes();
    vector<unsigned int> lipOuterLinerPoints= iFaceParts.VO_GetOneFacePart(VO_FacePart::LIPOUTERLINE).GetIndexes();

    VO_Shape leftEyeShape       = iShape.GetSubShape(leftEyePoints);
    VO_Shape rightEyeShape      = iShape.GetSubShape(rightEyePoints);
    VO_Shape lipOuterLinerShape = iShape.GetSubShape(lipOuterLinerPoints);

    float dolEye = 12.0f;
    float dolMouth = 12.0f;

    unsigned int possibleLeftEyeMinX    = 0.0f > (leftEyeShape.MinX() - dolEye) ? 0: (int)(leftEyeShape.MinX() - dolEye);
    unsigned int possibleLeftEyeMinY    = 0.0f > (leftEyeShape.MinY() - dolEye) ? 0: (int)(leftEyeShape.MinY() - dolEye);
    unsigned int possibleLeftEyeMaxX    = (leftEyeShape.MaxX() + dolEye) > ImgWidth ? ImgWidth : (int)(leftEyeShape.MaxX() + dolEye);
    unsigned int possibleLeftEyeMaxY    = (leftEyeShape.MaxY() + dolEye) > ImgHeight ? ImgHeight : (int)(leftEyeShape.MaxY() + dolEye);
    unsigned int possibleLeftEyeWidth   = possibleLeftEyeMaxX - possibleLeftEyeMinX;
    unsigned int possibleLeftEyeHeight  = possibleLeftEyeMaxY - possibleLeftEyeMinY;
    unsigned int possibleRightEyeMinX   = 0.0f > (rightEyeShape.MinX() - dolEye) ? 0: (int)(rightEyeShape.MinX() - dolEye);
    unsigned int possibleRightEyeMinY   = 0.0f > (rightEyeShape.MinY() - dolEye) ? 0: (int)(rightEyeShape.MinY() - dolEye);
    unsigned int possibleRightEyeMaxX   = (rightEyeShape.MaxX() + dolEye) > ImgWidth ? ImgWidth : (int)(rightEyeShape.MaxX() + dolEye);
    unsigned int possibleRightEyeMaxY   = (rightEyeShape.MaxY() + dolEye) > ImgHeight ? ImgHeight : (int)(rightEyeShape.MaxY() + dolEye);
    unsigned int possibleRightEyeWidth  = possibleRightEyeMaxX - possibleRightEyeMinX;
    unsigned int possibleRightEyeHeight = possibleRightEyeMaxY - possibleRightEyeMinY;
    unsigned int possibleMouthMinX      = 0.0f > (lipOuterLinerShape.MinX() - dolMouth) ? 0: (int)(lipOuterLinerShape.MinX() - dolMouth);
    unsigned int possibleMouthMinY      = 0.0f > (lipOuterLinerShape.MinY() - dolMouth) ? 0: (int)(lipOuterLinerShape.MinY() - dolMouth);
    unsigned int possibleMouthMaxX      = (lipOuterLinerShape.MaxX() + dolMouth) > ImgWidth ? ImgWidth : (int)(lipOuterLinerShape.MaxX() + dolMouth);
    unsigned int possibleMouthMaxY      = (lipOuterLinerShape.MaxY() + dolMouth) > ImgHeight ? ImgHeight : (int)(lipOuterLinerShape.MaxY() + dolMouth);
    unsigned int possibleMouthWidth     = possibleMouthMaxX - possibleMouthMinX;
    unsigned int possibleMouthHeight    = possibleMouthMaxY - possibleMouthMinY;

    Rect LeftEyePossibleWindow  = Rect( possibleLeftEyeMinX, possibleLeftEyeMinY, possibleLeftEyeWidth, possibleLeftEyeHeight );
    Rect RightEyePossibleWindow = Rect( possibleRightEyeMinX, possibleRightEyeMinY, possibleRightEyeWidth, possibleRightEyeHeight );
    Rect MouthPossibleWindow    = Rect( possibleMouthMinX, possibleMouthMinY, possibleMouthWidth, possibleMouthHeight );
    Rect CurrentWindow          = Rect( 0, 0, iImg.cols, iImg.rows );
    Rect DetectedLeftEyeWindow, DetectedRightEyeWindow, DetectedMouthWindow;

    bool LeftEyeDetected    = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, LeftEyePossibleWindow, DetectedLeftEyeWindow, VO_FacePart::LEFTEYE);
    bool RightEyeDetected   = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, RightEyePossibleWindow, DetectedRightEyeWindow, VO_FacePart::RIGHTEYE );
    bool MouthDetected      = const_cast<CFaceDetectionAlgs*>(fd)->VO_FacePartDetection ( iImg, MouthPossibleWindow, DetectedMouthWindow, VO_FacePart::LIPOUTERLINE );

    t = ((double)cvGetTickCount() -  t )
        / (cvGetTickFrequency()*1000.0f);
    cout << "Detection Confirmation time cost: " << t << "millisec" << endl;

    if(LeftEyeDetected && RightEyeDetected && MouthDetected)
        return true;
    else
        return false;
}
예제 #18
0
파일: mylable.cpp 프로젝트: xian0gang/test
void MyLable::set_scaled_button()
{
    int i, j, comp_count = 0;
    CvMat* color_tab = 0;
    CvSeq* contours = 0;

    cvSave("markes.xml",marker_mask);
    cvClearMemStorage(storage);
    cvFindContours( marker_mask, storage, &contours, sizeof(CvContour),
                    CV_RETR_CCOMP, CV_CHAIN_APPROX_SIMPLE );
    cvZero( markes );
    for( ; contours != 0; contours = contours->h_next, comp_count++ )
    {
        //cvDrawContours(markers, contours, cvScalarAll(comp_count+1),
            //cvScalarAll(comp_count+1), -1, -1, 8, cvPoint(0,0) );
        cvDrawContours(markes, contours, cvScalarAll(comp_count+1),
        cvScalarAll(comp_count+1), 1, -1, 8, cvPoint(0,0) );
    }

    color_tab = cvCreateMat( 1, comp_count, CV_8UC3 );//创建随机颜色列表
    for( i = 0; i < comp_count; i++ )	//不同的整数标记
    {
        uchar* ptr = color_tab->data.ptr + i*3;
        ptr[0] = (uchar)(cvRandInt(&rng)%180 + 50);
        ptr[1] = (uchar)(cvRandInt(&rng)%180 + 50);
        ptr[2] = (uchar)(cvRandInt(&rng)%180 + 50);
    }

    double t = (double)cvGetTickCount();
    cvSave("img0.xml",markes);
    cvWatershed( img0, markes );
    cvSave("img1.xml",markes);
//                cvShowImage("imgo",img0);
    t = (double)cvGetTickCount() - t;
    printf( "exec time = %gms\n", t/(cvGetTickFrequency()*1000.) );

    /*********/
    for( i = 0; i < markes->height; i++ )
        for( j = 0; j < markes->width; j++ )
        {
            int idx = CV_IMAGE_ELEM( markes, int, i, j );//markers的数据类型为IPL_DEPTH_32S
            uchar* dst = &CV_IMAGE_ELEM( wshed, uchar, i, j*3 );//BGR三个通道的数是一起的,故要j*3
            uchar* t_body = &CV_IMAGE_ELEM( tongue_body, uchar, i, j*3 );
            uchar* src_pic = &CV_IMAGE_ELEM( img0, uchar, i, j*3 );
            if( idx == -1 ) //输出时若为-1,表示各个部分的边界
            {
                //dst[0] = dst[1] = dst[2] = (uchar)128;
                dst[2] = (uchar)255;
            }
            else if( idx <= 0 || idx > comp_count )  //异常情况
            {
                //dst[0] = dst[1] = dst[2] = (uchar)0; // should not get here
            }
            else if( idx == 1 )  //异常情况
            {
                //green      first
                dst[0] = (uchar)255;
                t_body[0] = src_pic[0];
                t_body[1] = src_pic[1];
                t_body[2] = src_pic[2];
            }
            else if( idx == 2 )  //异常情况
            {
                //blue        second
                dst[1] = (uchar)255;
            }
            else if( idx == 3 )  //异常情况
            {
                //red         third
                dst[2] = (uchar)255;
            }
            else //正常情况
            {
                uchar* ptr = color_tab->data.ptr + (idx-1)*3;
                dst[0] = ptr[0]; dst[1] = ptr[1]; dst[2] = ptr[2];
            }
        }

    cvShowImage("img_gray",img_gray);
    cvShowImage("wshed",wshed);
    cvAddWeighted( wshed, 0.5, img_gray, 0.5, 0, wshed );
                //wshed.x.y=0.5*wshed.x.y+0.5*img_gray+0加权融合图像
                //cvShowImage("swhed",wshed);
                //cvShowImage("img_gray",img_gray);
                //cvShowImage( "watershed transform", wshed );
                //cvShowImage("img_final",img_final);
    cvShowImage( "watershed transform", wshed );

    img = Function::CjwIplToQImg(tongue_body);
    image = *img;

    cvReleaseMat( &color_tab );
    update();
    clear_list();
}
// MATLAB entry point
void mexFunction(int nlhs, mxArray *plhs[], int nrhs, const mxArray *prhs[])
{
    if (nrhs < 2 || nrhs > 3)
    {
        mexErrMsgTxt("Improper number of input arguments.\n\n"
                "FLANDMARK_DETECTOR_BASE detects facial landmarks.\n"
                "Synopsis: \n"
                "flandmark_detector(face_image, fname)\n"
                "\n"
                "Input: \n"
                "  face_image [im_H * im_W (uint8)] column vector \n"
                "  model [(uint8)] data from flandmark_model.dat \n"
                "Output: \n"
                "  landmarks [2 x M] matrix (DOUBLE) x, y coordinates of landmarks in original image\n"
                "\n"
             );
    }
    
    if (!mxIsUint8(prhs[0]))
    {
        mexErrMsgTxt("Improper argument type \"face_image\"\n\n"
                "  face image must be [im_H * im_W] uint8 matrix.\n"
              );
    }
    
    if (!mxIsUint8(prhs[1]))
    {
        mexErrMsgTxt("Improper argument type \"face_image\"\n\n"
                "  face image must be uint8 array loaded by flandmark_load_model.\n"
              );
    }

    uint8_t * face_image = (uint8_t*)mxGetPr(prhs[0]);

    double start, ms;

    /* check input */
    const mwSize *face_dim;
    face_dim = mxGetDimensions(prhs[0]);
    if (face_image == NULL)
    {
        mexErrMsgTxt("Improper argument type \"face_image\"\n\n"
                "  face image must be [im_H x im_W] uint8 matrix.\n"
              );
    }

    FLANDMARK_Model * model = (FLANDMARK_Model*)mxGetPr(prhs[1]);

    const mwSize *mx_dim = mxGetDimensions(prhs[0]);
    if (mx_dim[0]*mx_dim[1] != model->data.options.bw[0]*model->data.options.bw[1])
    {
        mexErrMsgTxt("Improper argument type \"bbox\"\n\n"
                     "   bbox must be [1 x 4 (int)] int32 vector.\n"
            );
    }

    start = (double)cvGetTickCount();

    plhs[0] = mxCreateNumericMatrix(2, model->data.options.M, mxDOUBLE_CLASS, mxREAL);
    double *landmarks = (double*)mxGetPr(plhs[0]);
    flandmark_detect_base(face_image, model, landmarks);

    start = (double)cvGetTickCount() - start;
    ms = cvRound( start / ((double)cvGetTickFrequency() * 1000.0) );

    mexPrintf("Landmarks positions estimated in %.6f ms\n", ms);
}
예제 #20
0
static
int build_svm_classifier( char* data_filename, const char* filename_to_save, const char* filename_to_load )
{
    CvMat* data = 0;
    CvMat* responses = 0;
    CvMat* train_resp = 0;
    CvMat train_data;
    int nsamples_all = 0, ntrain_samples = 0;
    int var_count;
    CvSVM svm;

    int ok = read_num_class_data( data_filename, 16, &data, &responses );
    if( !ok )
    {
        printf( "Could not read the database %s\n", data_filename );
        return -1;
    }
    ////////// SVM parameters ///////////////////////////////
    CvSVMParams param;
    param.kernel_type=CvSVM::LINEAR;
    param.svm_type=CvSVM::C_SVC;
    param.C=1;
    ///////////////////////////////////////////////////////////

    printf( "The database %s is loaded.\n", data_filename );
    nsamples_all = data->rows;
    ntrain_samples = (int)(nsamples_all*0.1);
    var_count = data->cols;

    // Create or load Random Trees classifier
    if( filename_to_load )
    {
        // load classifier from the specified file
        svm.load( filename_to_load );
        ntrain_samples = 0;
        if( svm.get_var_count() == 0 )
        {
            printf( "Could not read the classifier %s\n", filename_to_load );
            return -1;
        }
        printf( "The classifier %s is loaded.\n", filename_to_load );
    }
    else
    {
        // train classifier
        printf( "Training the classifier (may take a few minutes)...\n");
        cvGetRows( data, &train_data, 0, ntrain_samples );
        train_resp = cvCreateMat( ntrain_samples, 1, CV_32FC1);
        for (int i = 0; i < ntrain_samples; i++)
            train_resp->data.fl[i] = responses->data.fl[i];
        svm.train(&train_data, train_resp, 0, 0, param);
    }

    // classification
    std::vector<float> _sample(var_count * (nsamples_all - ntrain_samples));
    CvMat sample = cvMat( nsamples_all - ntrain_samples, 16, CV_32FC1, &_sample[0] );
    std::vector<float> true_results(nsamples_all - ntrain_samples);
    for (int j = ntrain_samples; j < nsamples_all; j++)
    {
        float *s = data->data.fl + j * var_count;

        for (int i = 0; i < var_count; i++)
        {
            sample.data.fl[(j - ntrain_samples) * var_count + i] = s[i];
        }
        true_results[j - ntrain_samples] = responses->data.fl[j];
    }
    CvMat *result = cvCreateMat(1, nsamples_all - ntrain_samples, CV_32FC1);

    printf("Classification (may take a few minutes)...\n");
    double t = (double)cvGetTickCount();
    svm.predict(&sample, result);
    t = (double)cvGetTickCount() - t;
    printf("Prediction type: %gms\n", t/(cvGetTickFrequency()*1000.));

    int true_resp = 0;
    for (int i = 0; i < nsamples_all - ntrain_samples; i++)
    {
        if (result->data.fl[i] == true_results[i])
            true_resp++;
    }

    printf("true_resp = %f%%\n", (float)true_resp / (nsamples_all - ntrain_samples) * 100);

    if( filename_to_save )
        svm.save( filename_to_save );

    cvReleaseMat( &train_resp );
    cvReleaseMat( &result );
    cvReleaseMat( &data );
    cvReleaseMat( &responses );

    return 0;
}
예제 #21
0
파일: main.cpp 프로젝트: wavs/pfe-2011-scia
void DetectAndDraw(IplImage* frame, HandyServer& server)
{
  double t = (double)cvGetTickCount();

  IplImage* img_nvg = cvCreateImage(cvGetSize(frame), frame->depth, 1);
  IplImage* img_imp = cvCloneImage(img_nvg);

  // Niveau de gris puis binarisation
  cvConvertImage(frame, img_nvg, (frame->origin!=IPL_ORIGIN_TL) ? CV_CVTIMG_FLIP : 0);
  cvThreshold(img_nvg, img_imp, SEUIL_BINAIRE, 255, CV_THRESH_BINARY_INV);

  // dilatation puis erosion
  IplConvKernel* noyau = cvCreateStructuringElementEx(DILATE_RADIUS,
                                                      DILATE_RADIUS,
                                                      DILATE_RADIUS/2,
                                                      DILATE_RADIUS/2,
                                                      CV_SHAPE_ELLIPSE);
  cvDilate(img_imp, img_imp, noyau);
  cvErode(img_imp,  img_imp, noyau);

  // Detection des formes
  CvMemStorage* storage = cvCreateMemStorage(0);
  CvSeq* contour = NULL;
  cvFindContours(img_imp, storage, &contour, sizeof(CvContour));

  // Calcul du centre des doigts et envoie au server
  std::vector<Pos2d> positions;
  for(;contour != 0; contour = contour->h_next)
  {
    int max_x = -1, max_y = -1, min_x = MAX_XY, min_y = MAX_XY;
    for(int i = 0; i < contour->total; ++i)
    {
      CvPoint* p = CV_GET_SEQ_ELEM(CvPoint, contour, i);
      if (p->x > max_x) max_x = p->x;
      if (p->y > max_y) max_y = p->y;
      if (p->x < min_x) min_x = p->x;
      if (p->y < min_y) min_y = p->y;
    }

    CvPoint center;
    Pos2d pos((max_x + min_x) / 2, (max_y + min_y) / 2);
    center.x = pos.x;
    center.y = pos.y;
    positions.push_back(pos);

    int szx = max_x - min_x, szy = max_y - min_y;
    if (szx > MIN_SZ && szy > MIN_SZ &&
        szx < MAX_SZ && szy < MAX_SZ &&
        szx / (float)szy < MAX_RATIO &&
        szy / (float) szx < MAX_RATIO)
      cvCircle(frame, center, 16, CV_RGB(255,0,0));
  }
  server.sendPos(positions);

  // Liberation de la memoire
  cvReleaseMemStorage(&storage);
  cvReleaseImage(&img_nvg);
  cvReleaseImage(&img_imp);

  // Calcul du temps ecoulé et affichage de l'image
  t = (double)cvGetTickCount() - t;
  std::cout << "detection time = " << t/((double)cvGetTickFrequency()*1000.) << "ms" << std::endl;

  cvShowImage("result", frame);
}
int testfaceLib_sThread ( const char* str_video, int  trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster)
{
	int  faceimgID = 0;
	char driver[8];
	char dir[1024];
	char fname[1024];
	char ext[8];
	char sImgPath[1024];

	if(sfolder)
	{
		char sysCommand[128];
		sprintf (sysCommand, "mkdir %s", sfolder);
		system (sysCommand);

		sprintf(sImgPath, "%s//%s", sfolder,  "imaginfo.txt");
		sprintf(fname,   "%s//%s", sfolder,  "faceinfo.txt");
	}
	else
	{
		sprintf(sImgPath, "%s", "imaginfo.txt");
		sprintf(fname,   "%s", "faceinfo.txt");
	}

	FILE* fp_imaginfo = fopen( sImgPath, "wt" );
    FILE* fp_faceinfo = fopen( fname, "wt" );

    bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256]="";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);
	
	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}
	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );

	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType == TRA_HAAR ? "haar" : (trackerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// config face tracker
	const int  face_max = 16;
	CvRectItem rects[face_max];
	
	tagDetectConfig configParam;
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;

	CxlibFaceDetector detector;
	detector.init(viewAngle, (EnumFeaType)trackerType);
	detector.config( configParam );

	CxlibFaceTracker tracker;
	tracker.init(viewAngle, (EnumTrackerType)trackerType);
	tracker.config( configParam, TR_NLEVEL_3 );

	if( cvGetErrStatus() < 0 )
	{
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// config landmark detector
	CvPoint2D32f   landmark6[6+1]; // consider both 6-pt and 7-pt
	float          parameters[16];
	bool      bLandmark = false;
	CxlibLandmarkDetector landmarkDetector(LDM_6PT);

	int size_smallface = 64;
	int size_bigface   = 128;
	CxlibAlignFace cutFace(size_smallface, size_bigface);
	
	// config blink/smile/gender detector
	int    bBlink = 0, bSmile = 0, bGender = 0, bAge = 0;  //+1, -1, otherwise 0: no process 
	float  probBlink = 0, probSmile = 0, probGender = 0, probAge[4];
	int    nAgeID = 0;

	CxlibBlinkDetector  blinkDetector(size_smallface);
	CxlibSmileDetector  smileDetector(size_smallface);
	CxlibGenderDetector genderDetector(size_smallface);
	CxlibAgeDetector    ageDetector(size_bigface);

	// config face recognizer
	float probFaceID = 0;
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	CxlibFaceRecognizer faceRecognizer( size_bigface, recognizerType );
	if(recog) faceRecognizer.loadFaceModelXML(str_facesetxml);
	
	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play = true;
	mouse_faceparam.ret_online_collecting = 0;
		
	if(! quiet)
	{
		mouse_faceparam.face_num  = face_max;
		mouse_faceparam.rects     = rects;
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= cutFace.getBigCutFace();
		mouse_faceparam.typeRecognizer = 1;
		mouse_faceparam.faceRecognizer = &faceRecognizer;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks, landmark_total_ticks, align_total_ticks,
		   blink_total_ticks, smile_total_ticks, gender_total_ticks, age_total_ticks, recg_total_ticks;
	double frame_fps, tracker_fps, landmark_fps, align_fps, blink_fps, smile_fps, gender_fps, age_fps, recg_fps, total_fps; 

	start_ticks         = total_ticks          = 0;
	tracker_total_ticks = landmark_total_ticks = align_total_ticks  = 0;
	blink_total_ticks   = smile_total_ticks    = gender_total_ticks = age_total_ticks = recg_total_ticks = 0;

	tracker_fps = landmark_fps = align_fps = blink_fps = smile_fps = gender_fps = age_fps = recg_fps = total_fps = 0.0;        

	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;
	int   print_faceid=-1;
	float print_score = 0;
	std::string  print_facename;

	bool bRunLandmark = blink || smile|| gender|| age|| recog || saveface;
	IplImage *thumbnailImg   = cvCreateImage(cvSize(THUMBNAIL_WIDTH, THUMBNAIL_HEIGHT), IPL_DEPTH_8U, 3);   
	
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster = true;
	if( is_piclist ) bEnableAutoCluster = false;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			continue;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		// do face tracking
		start_ticks = ticks = cvGetTickCount();	
       
		int face_num = 0;
        if( is_piclist )
            face_num = detector.detect( gray_image, rects, face_max );
        else
            face_num = tracker.track( gray_image, rects, face_max, image ); // track in a video for faster speed
		  //face_num = tracker.detect( gray_image, rects, face_max ); // detect in an image

		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

        // blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		//for( int i=0; i< MIN(1,face_num); i++ )
		{
			// get face rect and id from face tracker
			CvRect rect = rects[i].rc;

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d %f", rect.x, rect.y, rect.width, rect.height, rects[i].prob );

			int    face_trackid = rects[i].fid;
			float  like = rects[i].prob;
			int    angle= rects[i].angle;

			// filter out outer faces
			if( rect.x+rect.width  > gray_image.width()   || rect.x < 0 ) continue;
			if( rect.y+rect.height > gray_image.height() || rect.y < 0 ) continue;

			//tracker.getThumbnail(image, rect, thumbnailImg);

			// detect landmark points 
			ticks = cvGetTickCount();	

			if(bRunLandmark)
			{
                if( is_piclist )
				    bLandmark = landmarkDetector.detect( gray_image, &rect, landmark6, parameters, angle ); //detect in an image
                else
				    bLandmark = landmarkDetector.track( gray_image, &rect, landmark6, parameters, angle ); // track in a video for faster speed

				ticks = cvGetTickCount() - ticks;
				landmark_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
				landmark_total_ticks += ticks;
			}
			else
				bLandmark = false;

	
			if(quiet == false && bLandmark == false) 
			{
				//DrawFaceRect
				cxlibDrawFaceRect(image, rect);
				continue;
			}

			// warped align face and hist eq to delighting
			ticks = cvGetTickCount();	

			cutFace.init(gray_image, rect, landmark6);

			ticks = cvGetTickCount() - ticks;
			if(ticks > 1)
				align_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
			else
			{	align_fps = 0;
				ticks = 0;
			}
			align_total_ticks += ticks;

			if(saveface)   //save face icon for training later
			{
				//save cutfaces
				if(sfolder)
				{
#ifdef WIN32
					_splitpath(vidcap->filename(),driver,dir,fname,ext);
					sprintf(sImgPath, "%s//%s%s", sfolder, fname,ext);
#else
					sprintf(sImgPath, "%s//%06d.jpg", sfolder, faceimgID++);
#endif
				}
				else
					sprintf(sImgPath, "%s#.jpg", vidcap->filename());
				
				cvSaveImage(sImgPath, cutFace.getBigCutFace());
			}

			// detect blink
			bBlink = 0;	
			probBlink = 0;
			if(blink && bLandmark)
			{
				ticks = cvGetTickCount();	
				float blink_threshold = blinkDetector.getDefThreshold();//0.5;
				int ret = blinkDetector.predict( &cutFace, &probBlink);
			
				if(probBlink > blink_threshold )
					bBlink = 1;  //eye close
				else 
					bBlink = -1; //eye open

				ticks = cvGetTickCount() - ticks;
				blink_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				blink_total_ticks += ticks;

				print_score = probBlink;
			}
			else blink_fps = 0;

			// detect smile
			bSmile    = 0;	
			probSmile = 0;
			if ( smile && bLandmark )
			{	
				ticks = cvGetTickCount();
				float smile_threshold = smileDetector.getDefThreshold(); //0.48;  
				int ret = smileDetector.predict(&cutFace, &probSmile);

				if(probSmile > smile_threshold)
					bSmile = 1;  //smile
				else 
					bSmile = -1; //not smile

				ticks	  = cvGetTickCount() - ticks;
				smile_fps = 1000.0 /( 1e-3 * ticks / cvGetTickFrequency() );
				smile_total_ticks += ticks;

				print_score = probSmile;
			}
			else smile_fps = 0;

			//detect gender
			bGender    = 0;	
			probGender = 0;
			if(gender && bLandmark)
			{
				ticks = cvGetTickCount();	
				float gender_threshold = genderDetector.getDefThreshold(); // 0.42; 
				int ret = genderDetector.predict(&cutFace, &probGender);

				if(probGender > gender_threshold)
					bGender =  1; //female
				else
					bGender = -1; //male

				//bGender = -1:male, 1:female, 0: null
				// smooth prediction result
                if( ! is_piclist )
				    bGender = genderDetector.voteLabel(face_trackid, bGender);
				
				ticks = cvGetTickCount() - ticks;
				gender_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				gender_total_ticks += ticks;

				print_score = probGender; 
			}
			else gender_fps = 0;

			//detect age
			nAgeID  = -1;
			if(age && bLandmark && rect.width*rect.height > 40*40)
			{
				ticks = cvGetTickCount();	

				//nAgeID = 0:"Baby", 1:"Kid", 2:"Adult", 3:"Senior"
				nAgeID = ageDetector.predict(&cutFace, probAge);

				// smooth prediction result
                if( ! is_piclist )
				    nAgeID = ageDetector.voteLabel(face_trackid, nAgeID); 

				ticks = cvGetTickCount() - ticks;
				age_fps = 1000.0/(1e-3*ticks/cvGetTickFrequency());
				age_total_ticks += ticks;

				print_score = probAge[nAgeID]; 
				//if( ! quiet )	cxDrawAignFace2Image(image, pCutFace2);
			}
			else 
			{
				age_fps = 0;
			}

			// recognize the face id
			// we only do recognition every 5 frames,interval
			char  *sFaceCaption = NULL;
			char  sFaceCaptionBuff[256];
            int face_id = 0;
			probFaceID = 0;
			if ( recog && bLandmark )
			{
				ticks = cvGetTickCount();
				float face_threshold = faceRecognizer.getDefThreshold(); 
				/////////////////////////////////////////////////////////////////////////////////////////
				int face_id  = -1;
				if(bEnableAutoCluster & !is_piclist)
				{
					bool bAutocluster = true;
					if(mouse_faceparam.ret_online_collecting) bAutocluster = false;
					//face clustering
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID, bAutocluster, face_trackid, frames);
				}
				else//face recognition
					face_id  = faceRecognizer.predict(&cutFace, &probFaceID);
				/////////////////////////////////////////////////////////////////////////////////////////

				ticks    = cvGetTickCount() - ticks;
				recg_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );
				recg_total_ticks += ticks;
				
				// smooth prediction result
                if( ! is_piclist && !bEnableAutoCluster)
                {
				    if(probFaceID > face_threshold*1.0)
					    face_id = faceRecognizer.voteLabel(face_trackid, face_id); 
				    else
					    face_id = faceRecognizer.voteLabel(face_trackid, -1);
                }
				else if(probFaceID <= face_threshold)
				{
					face_id =-1;
				}

				//set face name caption
				if(face_id >= 0)
				{
					// recognized face name
					const char* sFaceName = faceRecognizer.getFaceName(face_id);
					sprintf(sFaceCaptionBuff, "%s %.2f", sFaceName, probFaceID);
					//sprintf(sFaceCaptionBuff, "%s", sFaceName); //dispaly score
					sFaceCaption = sFaceCaptionBuff;
					
					print_score  = probFaceID;
					print_faceid = face_id;
				}
				else
				{   // failed to recognize 
					//sprintf(sFaceCaptionBuff, "N\A %.2f", probFaceID);
					//sFaceCaption = sFaceCaptionBuff;
				}

				// collect and save unknown face exemplars
				if(probFaceID < face_threshold*0.9 || face_id != mouse_faceparam.ret_faceset_id )
				{
					if(mouse_faceparam.ret_online_collecting && (face_num ==1 || face_trackid == mouse_faceparam.ret_facetrack_id))
					{
						if( rect.x > gray_image.width()/4 && rect.x+rect.width < gray_image.width()*3/4 ) 
						{
							mouse_faceparam.updated = true;
							int nFaceSetIdx = faceRecognizer.getFaceSetIdx(mouse_faceparam.ret_faceset_id);
							bool bflag = faceRecognizer.tryInsertFace(cutFace.getBigCutFace(), nFaceSetIdx);
							//printf("insert flag %d\n", bflag);
						}
					}
				}
			}
			else recg_fps = 0;

			if( ! quiet )
			{
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

				//sprintf(sFaceCaptionBuff, "%.2f", print_score);
				//sFaceCaption = sFaceCaptionBuff;

				int trackid = -1; //face_trackid. don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            // log file
            if( fp_faceinfo != NULL )
            {
                // index,  rect,  landmark6,  bBlink, probBlink, bSmile, probSmile, bGender, probGender, nAgeID, probAge[nAgeID], face_id, probFaceID
				//fprintf( fp_faceinfo, "#%s# @%s@ ",    vidcap->filename(), sImgPath);
				fprintf( fp_faceinfo, "#%s# ",    vidcap->filename());
                fprintf( fp_faceinfo, "faceidx=( %06d %02d )", vidcap->index(), i+1 );
				fprintf( fp_faceinfo, "   rect=( %3d %3d %3d %3d )", rect.x, rect.y, rect.width, rect.height );
                fprintf( fp_faceinfo, "   landmark6=(" );
                int l;
                for( l = 0; l < 6; l++ )
                    fprintf( fp_faceinfo, " %3.0f %3.0f", landmark6[l].x, landmark6[l].y );
                fprintf( fp_faceinfo, " )");
                fprintf( fp_faceinfo, "   blink=( %+d %f )", bBlink, probBlink );
                fprintf( fp_faceinfo, "   smile=( %+d %f )", bSmile, probSmile );
                fprintf( fp_faceinfo, "   gender=( %+d %f )", bGender, probGender );
                fprintf( fp_faceinfo, "   agegroup=( %+d %f )", nAgeID, (nAgeID >= 0 && nAgeID < 4) ? probAge[nAgeID] : 1.0f );
                fprintf( fp_faceinfo, "   identity=( %+d %f )", face_id, probFaceID );
                fprintf( fp_faceinfo, "\n" );
            }
        }
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		ticks    = cvGetTickCount() - start_ticks;
		total_ticks += (ticks);
		frame_fps = 1000.0f / ( 1e-3 * ticks / cvGetTickFrequency() );

		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS: %03d Fd:%04d Ld:%04d Fa:%04d Bl:%04d Sm:%04d Ge:%04d Ag:%03d Rc:%03d",
					(int)frame_fps, (int)tracker_fps, (int)landmark_fps, (int)align_fps, 
					(int)blink_fps,   (int)smile_fps,    (int)gender_fps, (int)age_fps, (int)recg_fps);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
	
		//show Image
		if (image.width() <= 800)
		{
			//show image
			cvShowImage( str_title, image );
		}
		else
		{   // show scaled smaller image
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey( 30 );
		//int key = cvWaitKey( );
		if( key == ' ' ) // press the spacebar to pause the video play 
			cvWaitKey( 0 );                           
		else if( key == 27 )
			break;	    // press 'esc' to exit
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = rects[0].rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   // collect face exemplars for current selected facename
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			// turn on/off the autofocus flag
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting faces
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{   // save face models
				faceRecognizer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print speed info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );

	if (landmark_total_ticks != 0.0)
		landmark_fps = 1.0f  / ( landmark_total_ticks * temp / frames );

	if (align_total_ticks != 0.0)
		align_fps    = 1.0f  / ( align_total_ticks * temp / frames );

	if (blink_total_ticks != 0.0)
		blink_fps  = 1.0f  / (blink_total_ticks * temp / frames);

	if (smile_total_ticks != 0.0)
		smile_fps  = 1.0f  / (smile_total_ticks * temp / frames);

	if (gender_total_ticks != 0.0)
		gender_fps = 1.0f  / (gender_total_ticks * temp / frames);

	if (age_total_ticks != 0.0)
		age_fps = 1.0f  / (age_total_ticks * temp / frames);

	if (recg_total_ticks != 0.0)
		recg_fps   = 1.0f  / (recg_total_ticks  * temp / frames);

	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: Fd:%.1f Ld:%.1f Fa:%.1f Bl:%.1f Sm:%.1f Ge:%.1f Ag:%.1f Rc:%.1f",
		tracker_fps, landmark_fps, align_fps, 
		blink_fps,   smile_fps,    gender_fps, age_fps, recg_fps);

	//save updated face model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceRecognizer.saveFaceModelXML("faceset_model.xml");
	}

	
	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight = 10;
	faceRecognizer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceRecognizer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);
	//faceRecognizer.saveFaceModelXML("faceset_modelMerged#.xml");

	//release buff 
	
	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&thumbnailImg);
	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );
	
    if( fp_faceinfo != NULL )
        fclose( fp_faceinfo );

    return 0;
}
예제 #23
0
double CDetectionAlgs::BoostingDetection( vector<Rect>& objs,
        const CascadeClassifier& cascade,
        const Mat& img,
        const Rect* confinedArea,
        const double scale,
        Size sSize,
        Size bSize)
{
    double res = (double)cvGetTickCount();
    objs.clear();

    Mat confinedImg;
    if(confinedArea)
        confinedImg = img(*confinedArea);
    else
        confinedImg = img;

    Mat gray, smallImg( cvRound (confinedImg.rows/scale),
                        cvRound(confinedImg.cols/scale),
                        CV_8UC1 );

    if(confinedImg.channels() == 3)
        cvtColor( confinedImg, gray, CV_BGR2GRAY );
    else
        gray = confinedImg;
    resize( gray, smallImg, smallImg.size(), 0, 0, INTER_LINEAR );
    equalizeHist( smallImg, smallImg );

    /////////////////detection/////////////////////////////////////////
    //t = (double)cvGetTickCount();
    const_cast<CascadeClassifier&>(cascade).detectMultiScale(
        smallImg,
        objs,
        1.1,
        2,
        0,
        //|CascadeClassifier::DO_CANNY_PRUNING
        //|CascadeClassifier::FIND_BIGGEST_OBJECT
        //|CascadeClassifier::DO_ROUGH_SEARCH
        //|CascadeClassifier::SCALE_IMAGE,
        sSize,
        bSize);

    ///////////////////////sort///////////////////////////////////////
    if (objs.size() > 0)
    {
        qsort( (void *)&(objs[0]), objs.size(), sizeof(Size), cvSizeCompare );
        // re-position
        if (confinedArea)
        {
            for (int i = 0; i < objs.size(); ++i)
            {
                objs[i].x = objs[i].x*scale+confinedArea->x;
                objs[i].y = objs[i].y*scale+confinedArea->y;
            }
        }
        else
        {
            for (int i = 0; i < objs.size(); ++i)
            {
                objs[i].x = objs[i].x*scale;
                objs[i].y = objs[i].y*scale;
            }
        }

        //scale back
        for ( int i = 0; i < objs.size(); ++i)
        {
            objs[i].width *= scale;
            objs[i].height *= scale;
        }
    }

    res = ((double)cvGetTickCount() - res)
          / ((double)cvGetTickFrequency()*1000.);
    return res;
}
int testfaceLib_pThread ( const char* str_video, int trackerType, int multiviewType, int recognizerType, const char* str_facesetxml, int threads, 
						 bool blink, bool smile, bool gender, bool age, bool recog, bool quiet, bool saveface, const char* sfolder, bool bEnableAutoCluster )
{
    FILE* fp_imaginfo = fopen( "imaginfo.txt", "w" );

	bool bAutoFocus = false;
	IplImage *imgAutoFocus = NULL;

	int  sampleRate =1;
	
	if(str_facesetxml == NULL)
		str_facesetxml = "faceset_model.xml";

	int  prob_estimate[7];
	char sState[256];
	EnumViewAngle  viewAngle = (EnumViewAngle)multiviewType;
	//dynamic clustering for smooth ID registration
	//bEnableAutoCluster =  true;

	CxlibFaceAnalyzer faceAnalyzer(viewAngle, (EnumTrackerType)trackerType, blink, smile, gender, age, recog, sampleRate, str_facesetxml, recognizerType, bEnableAutoCluster); 

	/////////////////////////////////////////////////////////////////////////////////////
	//	init GUI window
	const char* str_title = "Face Tester";
	if( ! quiet )
		cvNamedWindow( str_title, CV_WINDOW_AUTOSIZE );

	char sCaptionInfo[256] = "";
	CvFont *pFont = new CvFont;
	cvInitFont(pFont, CV_FONT_HERSHEY_PLAIN, 0.85, 0.85, 0, 1);

	// load GUI smile icon images
	IplImage *pImgSmileBGR;
	IplImage *pImgSmileMask;
	if(age == 0)
	{   // smile icon
		pImgSmileBGR  = cvLoadImage( "smile.bmp" );
		pImgSmileMask = cvLoadImage( "smilemask.bmp", 0 );
	}
	else
	{   // gender/age/smile icons
		pImgSmileBGR  = cvLoadImage( "faceicon.bmp" );
		pImgSmileMask = cvLoadImage( "faceiconMask.bmp", 0 );
	}

	IplImage *pImgSmileBGRA = cvCreateImage( cvSize(pImgSmileBGR->width, pImgSmileBGR->height), IPL_DEPTH_8U, 4 );
	cvCvtColor(pImgSmileBGR, pImgSmileBGRA, CV_BGR2BGRA );

	// open video source
    size_t len = strlen( str_video );
    bool is_piclist = (0 == stricmp( str_video + len - 4, ".txt" ));
    CxImageSeqReader* vidcap = NULL;
    if( is_piclist )
        vidcap = new CxPicListReader( str_video );
    else
        vidcap = new CxVideoReader( str_video );
	if( cvGetErrStatus() < 0 )
	{   
		cvSetErrStatus( CV_StsOk );
		return -1;
	}

	// when using camera, set to 640x480, 30fps
	if( isdigit(str_video[0]) != 0 && str_video[1] == '\0' )
	{
		vidcap->width( 640 );
		vidcap->height( 480 );
		vidcap->fps( 30 );
	}

	// print beginning info
	printf( "tracker cascade:  '%s'\n", trackerType== TRA_HAAR ? "haar" : (recognizerType== TRA_SURF ? "surf" : "pf tracker SURF"));
	printf( "face recognizer:  '%s'\n", recognizerType == RECOGNIZER_BOOST_GB240 ? "boost gabor240" : "cascade gloh"  );
	printf( "video:    '%s', %dx%d, %2.1f fps\n", str_video, 
		vidcap->width(), vidcap->height(), vidcap->fps() );

	// set mouse event process
	CxMouseParam mouse_faceparam;
	mouse_faceparam.updated = false;
	mouse_faceparam.play    = true;
	mouse_faceparam.ret_online_collecting = 0;

	static const int MAX_FACES = 16; 
	if(! quiet)
	{
		mouse_faceparam.play    = true;
		mouse_faceparam.updated = false;
		mouse_faceparam.face_num  = faceAnalyzer.getMaxFaceNum();
		mouse_faceparam.rects     = faceAnalyzer.getFaceRects();
		mouse_faceparam.image     = NULL;
		mouse_faceparam.cut_big_face= faceAnalyzer.getBigCutFace();
		mouse_faceparam.typeRecognizer = 0;
		mouse_faceparam.faceRecognizer = &faceAnalyzer;
		mouse_faceparam.ret_online_collecting = 0;
		cvSetMouseCallback(	str_title, my_mouse_callback, (void*)&mouse_faceparam );
		faceAnalyzer.setMouseParam(&mouse_faceparam);
	}

	// init count ticks                   
	int64  ticks, start_ticks, total_ticks;
	int64  tracker_total_ticks;
	double tracker_fps, total_fps; 

	start_ticks         = total_ticks  = 0;
	tracker_total_ticks = 0;
		
	// loop for each frame of a video/camera
	int frames = 0;
	IplImage *pImg = NULL;

	while( ! vidcap->eof() )
	{   
		// capture a video frame
		if( mouse_faceparam.play == true)
			pImg = vidcap->query();
		else 
			continue;

		if ( pImg == NULL )
			break;

		// make a copy, flip if upside-down
		CvImage image( cvGetSize(pImg), pImg->depth, pImg->nChannels );
		if( pImg->origin == IPL_ORIGIN_BL ) //flip live camera's frame
			cvFlip( pImg, image );
		else
			cvCopy( pImg, image );

		// convert to gray_image for face analysis
		CvImage gray_image( image.size(), image.depth(), 1 );
		if( image.channels() == 3 )
			cvCvtColor( image, gray_image, CV_BGR2GRAY );
		else
			cvCopy( image, gray_image );

		///////////////////////////////////////////////////////////////////
		// do face tracking and face recognition
		start_ticks = ticks = cvGetTickCount();	

        if( is_piclist )
            faceAnalyzer.detect(gray_image, prob_estimate, sState);
        else
		    faceAnalyzer.track(gray_image, prob_estimate, sState, image);   // track face in each frame but recognize by pthread
		//faceAnalyzer.detect(gray_image, prob_estimate, sState);// track and recognizer face in each frame 

		int face_num = faceAnalyzer.getFaceNum();

		ticks       = cvGetTickCount() - ticks;
		tracker_fps = 1000.0 / ( 1e-3 * ticks / cvGetTickFrequency() );
		tracker_total_ticks += ticks;

		
		//set param for mouse event processing
		if(!quiet)
		{
			mouse_faceparam.face_num = face_num;
			mouse_faceparam.image    = image;
		}

        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "%s  %d", vidcap->filename(), face_num );

		// blink/smile/gender/age/face recognize section
		for( int i=0; i<face_num; i++ )
		{
			// get face rect and id from face tracker
			CvRectItem rectItem = faceAnalyzer.getFaceRect(i);
			CvRect rect = rectItem.rc;
			int    face_trackid = rectItem.fid;
			float  probSmile = faceAnalyzer.getFaceSmileProb(i);
			int    bBlink  = faceAnalyzer.getFaceBlink(i);
			int    bSmile  = faceAnalyzer.getFaceSmile(i);
			int    bGender = faceAnalyzer.getFaceGender(i);
			int    nAgeID  = faceAnalyzer.getFaceAge(i);
			int    nFaceID = faceAnalyzer.getFaceID(i);
			float  fFaceProb= faceAnalyzer.getFaceProb(i);
			
			char *sFaceCaption = NULL;
			char sFaceNameBuff[256];
			char *sFaceName = faceAnalyzer.getFaceName(i);
			if(sFaceName[0] != '\0')
			{
				sprintf(sFaceNameBuff, "%s %.2f", sFaceName, fFaceProb);
				sFaceCaption = sFaceName;
				sFaceCaption = sFaceNameBuff;
			}

			if( ! quiet )
			{
				CvPoint2D32f *landmark6 = NULL;
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

				int trackid = -1; //face_trackid , don't display trackid if -1
				cxlibDrawFaceBlob( image, pFont, trackid, rect, landmark6, probSmile, 
					bBlink, bSmile, bGender, nAgeID, sFaceCaption, NULL,
					pImgSmileBGR, pImgSmileBGRA, pImgSmileMask);
			}

            if( fp_imaginfo != NULL )
                fprintf( fp_imaginfo, "  %d %d %d %d", rect.x, rect.y, rect.width, rect.height );
		}
        if( fp_imaginfo != NULL )
            fprintf( fp_imaginfo, "\n" );

		///////////////////////////////////////////////////////////////////
		total_ticks += (cvGetTickCount() - start_ticks);
		
		// frame face_num
		frames++;

		//auto focus faces
		if(quiet == false && bAutoFocus)
		{
			if(imgAutoFocus)
				cvCopy(image, imgAutoFocus);
			else
				imgAutoFocus = cvCloneImage(image);

			CvRectItem *rects = faceAnalyzer.getFaceRects();
			cxlibAutoFocusFaceImage(imgAutoFocus, image, rects, face_num);
		}

		// next frame if quiet
		if( quiet )
			continue;
		else
		{
			// draw status info for custom interaction
			if(mouse_faceparam.ret_online_collecting == 1)
			{
				sprintf(sCaptionInfo, "Collecting faces for track_id = %d", mouse_faceparam.ret_facetrack_id);
				//draw face collecting region
				cvLine(image, cvPoint(image.width()/4, 0), cvPoint(image.width()/4, image.height()-1), CV_RGB(255,255,0), 2);
				cvLine(image, cvPoint(image.width()*3/4, 0), cvPoint(image.width()*3/4, image.height()-1), CV_RGB(255,255,0), 2);
			}
			else
				sprintf(sCaptionInfo, "FPS:%04d, %s", (int)tracker_fps, sState);

			cxlibDrawCaption( image, pFont, sCaptionInfo);
		}
		
		//show Image
		if (image.width() <= 800)
			cvShowImage( str_title, image );
		else
		{   // display scaled smaller aimge
			CvImage scale_image (cvSize(800, image.height()*800/image.width()), image.depth(), 3 );
			cvResize (image, scale_image);
			cvShowImage( str_title, scale_image );
		}

		// user interaction
		int key = cvWaitKey(1);
		//int key = cvWaitKey(0);
		if( key == ' ' )     // press space bar to pause the video play
			cvWaitKey( 0 );                           
		else if( key == 27 ) // press 'esc' to exit
			break;	                                   
		else if( key == 'a' )
		{  // add new face name
			if(face_num > 0)
			{   
				CvRect rect = faceAnalyzer.getFaceRect(0).rc;
				int x = rect.x+rect.width/2;
				int y = rect.y+rect.height/2;
				addFaceSet( x, y, &mouse_faceparam);
			}
		}
		else if( key == 'c' )
		{   //enable flag to collect face exemplars for the selected face name
			mouse_faceparam.ret_online_collecting = 1; //enable online face exemplar collecting
		}
		else if( key == 'z' )
			bAutoFocus = !bAutoFocus;
		else if(key >= 0)
		{
			if(mouse_faceparam.ret_online_collecting == 1)
			{   // stop collecting face exemplars
				mouse_faceparam.ret_online_collecting = 0; //disable online face exemplar collecting
				mouse_faceparam.ret_facetrack_id = -1;
			}

			if( key == 's')
			{
				// save faceset xml model
				faceAnalyzer.saveFaceModelXML("faceset_model.xml");
				sprintf(sCaptionInfo, "%s", "saved the face model");
				cxlibDrawCaption( pImg, pFont, sCaptionInfo);
				cvShowImage( str_title, pImg );
				cvWaitKey( 400 ); 
			}
		}
	}

	// print info about fps
	float temp    = 1e-6f / cvGetTickFrequency();
	tracker_fps   = 1.0f  / ( tracker_total_ticks * temp / frames );
	
	total_fps = 1.0f / (total_ticks * temp / frames);

	printf( "Total frames:%d  Speed:%.1f fps\n", frames, total_fps);
	printf( "FPS: %.1f ", tracker_fps);

	//save updated faceset model
	if(mouse_faceparam.updated == true)
	{
		sprintf(sCaptionInfo, "%s", "press key 's' to save updated face model or other keys to cancel");
		cxlibDrawCaption( pImg, pFont, sCaptionInfo);
		cvShowImage( str_title, pImg );

		int key = cvWaitKey();
		if( key == 's')
			faceAnalyzer.saveFaceModelXML("faceset_model.xml");
	}

	//save merged face model for dynamic clustering of smoothID
	vFaceSet vMergedFaceSet;
	int minWeight =10; 
	faceAnalyzer.getMergedFaceSet(vMergedFaceSet, minWeight);
	faceAnalyzer.saveFaceModelXML("faceset_modelMerged.xml", &vMergedFaceSet);

	//release global GUI data
	if( !quiet )
		cvDestroyWindow( str_title );

	cvReleaseImage(&pImgSmileBGR);
	cvReleaseImage(&pImgSmileBGRA);
	cvReleaseImage(&pImgSmileMask);
	delete pFont;

    delete vidcap;

    if( fp_imaginfo != NULL )
        fclose( fp_imaginfo );

    return 0;
}
예제 #25
0
int main()
{
	
	double time;
	CvScalar hsv_min;
	CvScalar hsv_max;

#ifdef VIDEO
	CvCapture* capture = cvCreateFileCapture("E:\\Evan Lin\\CLUB\\PingPong\\OPENCV\\Table Tennis.flv");
	//CvCapture *capture = cvCreateFileCapture("E:\\Evan Lin\\CLUB\\PingPong\\OPENCV\\Test\\motionTracking(ZC)\\bouncingBall.avi");
	//CvCapture *capture = cvCreateFileCapture("E:\\Evan Lin\\CLUB\\PingPong\\OPENCV\\PingPongTest.avi");
	//CvCapture *capture = cvCreateFileCapture("E:\\Evan Lin\\CLUB\\PingPong\\OPENCV\\WIN_20150803_103818.MP4");//WIN_20150803_102356.MP4
#else
	CvCapture* capture = cvCreateCameraCapture(0);
#endif
	if (!capture)
	{
		printf("ERROR ACQUIRING VIDEO FEED\n");
		getchar();
		return -1;
	}

	CvSize size = cvSize(cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_WIDTH), cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_HEIGHT));
	IplImage* frame = cvCreateImage(size, IPL_DEPTH_8U, 3);
	IplImage* hsv_frame = cvCreateImage(size, IPL_DEPTH_8U, 3);
	IplImage* thresholded = cvCreateImage(size, IPL_DEPTH_8U, 1);
	cvInitFont(&font, CV_FONT_HERSHEY_COMPLEX, 0.5, 0.5, 0, 1, 1);


	while (1)
	{
#ifdef VIDEO

		capture = cvCreateFileCapture("E:\\Evan Lin\\CLUB\\PingPong\\OPENCV\\Table Tennis.flv");
		if (!capture)
		{
			printf("ERROR ACQUIRING VIDEO FEED\n");
			getchar();
			return -1;
		}
		while (cvGetCaptureProperty(capture, CV_CAP_PROP_POS_FRAMES) < cvGetCaptureProperty(capture, CV_CAP_PROP_FRAME_COUNT) - 1)
		{
			time = (double)cvGetTickCount();
			frame = cvQueryFrame(capture);
			if (!frame)
			{
				fprintf(stderr, "ERROR: frame is null...\n");
				getchar();
				exit(1);
			}
			cvCvtColor(frame, hsv_frame, CV_BGR2HSV);
			hsv_min = cvScalar(H_MIN, S_MIN, S_MIN, 0);
			hsv_max = cvScalar(H_MAX, S_MAX, V_MAX, 0);
			cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
			cvErode(thresholded, thresholded, NULL, 1);
			cvDilate(thresholded, thresholded, NULL, 1);
			//cvSmooth(thresholded, thresholded, CV_GAUSSIAN, 9, 9);

			if (debugMode == true)
			{
				createTrackbars();
				cvNamedWindow("HSV", CV_WINDOW_AUTOSIZE);
				cvShowImage("HSV", thresholded);
			}
			else
			{
				cvDestroyWindow("trackbarControl");
				cvDestroyWindow("HSV");
			}

			if (trackingEnabled)
			{
				searchForMovement(thresholded, frame);
			}

			if (showEnabled)
			{
				time = ((double)cvGetTickCount() - time) / cvGetTickFrequency();
				double fps = 100000.0 / time;
				char str[20];
				sprintf(str, "FPS:%.2f", fps);
				cvPutText(frame, str, cvPoint(0, 30), &font, cvScalar(255, 0, 0));
			}

			cvShowImage("Frame1", frame);

			switch (cvWaitKey(33))
			{
			case 27: //'esc' key has been pressed, exit program.
				//cvReleaseMemStorage();
				cvReleaseCapture(&capture);
				cvDestroyAllWindows();
				return 0;
			case 116: //'t' has been pressed. this will toggle tracking
				trackingEnabled = !trackingEnabled;
				if (trackingEnabled == false) printf("Tracking disabled.\n");
				else printf("Tracking enabled.\n");
				break;
			case 100: //'d' has been pressed. this will debug mode
				debugMode = !debugMode;
				if (debugMode == false) printf("Debug mode disabled.\n");
				else printf("Debug mode enabled.\n");
				break;
			case 115: //'s' has been pressed. this will debug mode
				showEnabled = !showEnabled;
				if (showEnabled == false) printf("Show mode disabled.\n");
				else printf("Show mode enabled.\n");
				break;
			case 112: //'p' has been pressed. this will pause/resume the code.
				pause = !pause;
				if (pause == true)
				{
					printf("Code paused, press 'p' again to resume\n");
					while (pause == true)
					{
						//stay in this loop until 
						switch (cvWaitKey())
						{
						case 112:
							//change pause back to false
							pause = false;
							printf("Code resumed.\n");
							break;
						}
					}
				}
			}
		}
	}
#else
		time = (double)GetTickCount();
		//time = (double)cvGetTickCount();
		frame = cvQueryFrame(capture);
		if (!frame)
		{
			fprintf(stderr, "ERROR: frame is null...\n");
			getchar();
			exit(1);
		}
		cvCvtColor(frame, hsv_frame, CV_BGR2HSV);
		hsv_min = cvScalar(H_MIN, S_MIN, S_MIN, 0);
		hsv_max = cvScalar(H_MAX, S_MAX, V_MAX, 0);
		cvInRangeS(hsv_frame, hsv_min, hsv_max, thresholded);
		cvErode(thresholded, thresholded, NULL, 1);
		cvErode(thresholded, thresholded, NULL, 1);
		cvErode(thresholded, thresholded, NULL, 1);
		cvDilate(thresholded, thresholded, NULL, 1);
		//cvSmooth(thresholded, thresholded, CV_GAUSSIAN, 9, 9);

		if (debugMode == true)
		{
			createTrackbars();
			cvNamedWindow("HSV", CV_WINDOW_AUTOSIZE);
			cvShowImage("HSV", thresholded);
		}
		else
		{
			cvDestroyWindow("trackbarControl");
			cvDestroyWindow("HSV");
		}

		if (trackingEnabled)
		{
			searchForMovement(thresholded, frame);
		}

		if (showEnabled)
		{
			//time = ((double)cvGetTickCount() - time) / cvGetTickFrequency();
			//double fps = 100000.0 / time;
			time = (double)GetTickCount() - time;
			double fps = 1000.0 / time;
			char str[20];
			sprintf(str, "FPS:%.2f", fps);
			cvPutText(frame, str, cvPoint(0, 30), &font, cvScalar(255, 0, 0));
		}

		cvShowImage("Frame1", frame);

		
		}
/*
** ================ BEGIN MAIN SECTION =======================
*/
void target_tracking( int argc, char** argv )
{
    double t1, t2, fps;
    int  targ_selected = 0;
    int  camera_img_height, camera_img_width, camera_img_fps;
    int  waitkey_delay = 2;

    CvMat *image_gray = 0;
    CvMat *image_binary = 0;

    IplConvKernel *morph_kernel;

    CvSeq *contours;

    int i;
    
    pid = (int) getpid();
    /*
    **  See if we can catch signals
    */
    if ( signal(SIGTERM, sig_handler) == SIG_IGN) 
       signal(SIGTERM, SIG_IGN);
    if ( signal(SIGHUP, sig_handler) == SIG_IGN) 
       signal(SIGHUP, SIG_IGN);
    if ( signal(SIGINT, sig_handler) == SIG_IGN) 
       signal(SIGINT, SIG_IGN);

    /*
    **  Set the number of tracked targets to zero
    **   tracked targets are persistent targets for a sequence of frames/images
    **   detected targets are the targets detected from a single frame or image
    */
    num_tracked_targets = 0;

    /*  
    **  Capture images from webcam /dev/video0 
    **   /dev/video0 = 0
    **   /dev/video1 = 1
    */
    if ( argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0])))
    {
       printf(" Capturing image from camera\n");
       camera=cvCaptureFromCAM( argc == 2 ? argv[1][0] - '0' : 0 );
    } 
    else 
    {
       /* 
       **  Capture image from file at command line
       **   useful for playback video and testing
       */
       camera = cvCaptureFromFile( argv[1] );
    }


    /*
    **   Check and see if camera/file capture is valid
    */
    if (!camera) {
        printf("camera or image is null\n");
        return;
    }

    /*
    **  Get camera properties
    */
    camera_img_width = cvGetCaptureProperty(camera, CV_CAP_PROP_FRAME_WIDTH);
    camera_img_height = cvGetCaptureProperty(camera, CV_CAP_PROP_FRAME_HEIGHT);
    camera_img_fps = cvGetCaptureProperty(camera, CV_CAP_PROP_FPS);

    cvSetCaptureProperty( camera, CV_CAP_PROP_EXPOSURE, 500.0);
    cvSetCaptureProperty( camera, CV_CAP_PROP_EXPOSURE, 500.0);
    cvSetCaptureProperty( camera, CV_CAP_PROP_AUTO_EXPOSURE, 0.0);

    /*
    **  Parse the config file
    */
//    T456_parse_vision( "../config/t456-vision.ini" );
    T456_parse_vision( "/usr/local/config/t456-vision.ini" );
    T456_print_camera_and_tracking_settings();

    /*
    **  Start server listening on port 8080
    */
    T456_start_server();

    /*
    **  Setup graphic display windows using OpenCV
    */
#ifdef GRAPHICS
    cvNamedWindow("original", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("binary image", CV_WINDOW_AUTOSIZE);
    cvNamedWindow("contours", CV_WINDOW_AUTOSIZE);
//    cvNamedWindow("original", CV_WINDOW_KEEPRATIO);
printf("camera_img_fps: %d\n", camera_img_fps);
 
    if (camera_img_fps < 0 ) {
       camera_img_fps = 30;
       printf("camera_img_fps: %d\n", camera_img_fps);
    }

    waitkey_delay = (int) ((1.0f / (float) camera_img_fps) * 1000.0f);
    printf("waitkey_delay: %d\n", waitkey_delay);
    if ( (waitkey_delay > 50) || (waitkey_delay == 0) ) waitkey_delay = 2;

    cvInitFont( &font, CV_FONT_HERSHEY_PLAIN, 1.0, 1.0, 0.0, 1, 8);
#endif /* GRAPHICS */

    /*
    **  Construct a morphological kernel
    */
    morph_kernel = cvCreateStructuringElementEx(3, 3, 1, 1, 
                                                CV_SHAPE_RECT, NULL); 
    /*
    **
    */
    image_gray = cvCreateMat(camera_img_height, camera_img_width, CV_8UC1);
    image_binary = cvCreateMat(camera_img_height, camera_img_width, CV_8UC1);

    /*
    **  Time estimation variable
    */
    t1 = (double)cvGetTickCount();

    /*
    **   Process images until key press
    */

#ifdef GRAPHICS
    while (cvWaitKey(waitkey_delay) < 0)
#else
    while (1)
#endif
    {

       /*  
       ** reset detected targets 
       */
       num_detect_targets = 0;

       /*
       **  Grab initial frame from image or movie file
       */
       image = cvQueryFrame(camera);
       if ( !image ) {
          done();
          return;
       }

       /*
       **  Convert RGB image to Hue and Value images (local function)
       **
       **  Threshold out the lime green color from the hue image
       **    it is a bracket threshold, so two images are produced
       **      
       **  (HUE_MID_THRESH - 15) <  target_color < (HUE_MID_THRESH + 15)
       **
       ** Threshold value image. Because of illumination, the targets should
       **  be very bright in the image.
       **
       **  If pixel value of the image is > VAL_THRESH
       **  then output 255
       **  else output 0
       **
       **  Combine hue1, hue2, and value images with an AND operation
       **   output = hue1 AND hue1 AND value
       **  if a pixel in any of these images is pixel = 0 
       **      then the output is 0
       */

       T456_change_RGB_to_binary( image, image_binary );
        
        
       /*
       **   Use the Morph function to join broken or speckled rectangles
       */
       cvMorphologyEx( image_binary, image_gray, NULL,
                         morph_kernel, CV_MOP_CLOSE, 
                         tracking.morph_closing_iterations);

        /*
        **  Process contours and detect targets of interest
        */
        contours = find_contours( image_gray );

        /*
        **  Process raw target detections and compare with current 
        **  tracked targets
        */
        track_targets_over_time( frame_cnt );

        /*
        **  Rank and select highest scoring target
        */
        if ( num_tracked_targets > 0 )
        {
           T456_calculate_aimpoint( frame_cnt );
           targ_selected = T456_select_main_target( frame_cnt );

#ifdef GRAPHICS
           draw_target_dot( tracked_targets[targ_selected], 
                                 image, CV_RGB(0,255,0) );
#endif

        }

        /*
        **  Print out tracked targets
        */
        printf("%d ", frame_cnt);
        for ( i = 0; i < num_tracked_targets; i++ ) 
        {
             printf("%d ", tracked_targets[i].type);
             printf("%.2f %.2f ", tracked_targets[i].h_angle, 
                                  tracked_targets[i].v_angle);
             printf("%.2f %.2f ", tracked_targets[i].xcenter, 
                                  tracked_targets[i].ycenter);
             printf("(%.2f) ", tracked_targets[i].aspect_ratio);
             printf("(d: %.2f) ", tracked_targets[i].distance);
             printf("%d ", tracked_targets[i].time_tracked);

#ifdef GRAPHICS
             draw_target_center( tracked_targets[i],
                                  image, CV_RGB(255,255,0) );
#endif
        }
        printf("\n");

        /*
        **  pass selected target information into target message
        **   for the webservice
        */
        if ( num_tracked_targets == 0 ) {
           target_message_length =
              snprintf(target_message, sizeof(target_message),
              "%06d,00,000000,000000,000000,0000", frame_cnt);
        } else {
           target_message_length =
              snprintf(target_message, sizeof(target_message),
               "%06d,%02d,%06.2f,%06.2f,%06.1f,1.0", 
                    frame_cnt,
                    tracked_targets[targ_selected].type,
                    tracked_targets[targ_selected].aim_h_angle, 
                    tracked_targets[targ_selected].aim_v_angle,
                    tracked_targets[targ_selected].distance);
        }


#ifdef GRAPHICS
        /*
        **  Display images
        */
        cvShowImage("original",image);
        cvShowImage("binary image",image_binary);
#endif /* GRAPHICS */

        /*  
        ** keep time of processing 
        */
        t2 = (double)cvGetTickCount();
        fps = 1000.0 / ((t2-t1)/(cvGetTickFrequency()*1000.));
        fps_sum = fps_sum + fps;
        frame_cnt++;

#ifdef DIAG
        printf("time: %gms  fps: %.2g\n",(t2-t1)/(cvGetTickFrequency()*1000.),fps);
#endif

        t1 = t2;

        /*
        **  If we catch a stop or kill signal
        */
        if ( STOP ) {
          break;
        }

        /*
        **  Print out a diagnostic image every second
        */
#ifdef GRAPHICS
        if ( (frame_cnt % 15) == 0 )
        {
           if ( tracking.diag == 1 ) {
              sprintf(filename,"/home/panda/Images/frame_%05d_%06d.jpg",
                                                        pid,frame_cnt);
              cvSaveImage(filename, image, 0 );
           }
           if ( tracking.diag == 2 ) {
              sprintf(filename,"/home/panda/Images/frame_%05d_%06d_gray.jpg",
                                                     pid,frame_cnt);
              cvSaveImage(filename, image_gray, 0 );
           }
           if ( tracking.diag == 3 ) {
              sprintf(filename,"/home/panda/Images/frame_%05d_%06d_bin.jpg",
                                                      pid,frame_cnt);
              cvSaveImage(filename, image_binary, 0 );
           }
        }
#endif

    }

cvSaveImage("framegrab.jpg", image, 0 );
cvSaveImage("framegrab_bin.jpg", image_binary, 0 );
cvSaveImage("framegrab_gray.jpg", image_gray, 0 );

    /*
    **  Release camera resources
    */
    cvReleaseCapture(&camera);

    /*
    **  Print out timing information
    */
    done();

    /*
    **  Stop server listening on port 8080
    */
    T456_stop_server();

}
예제 #27
0
파일: flow2.c 프로젝트: rito96/3Asemester
int main(int argc, char** argv) {
  CvCapture* capture = 0;
  IplImage* input = 0;
  IplImage* gray = 0;
  IplImage* output = 0;
  IplImage* previous_gray = 0;
  int win_step;
  int template_width, template_height;
  CvPoint2D32f previous_point;
  CvPoint2D32f current_points[6][5];
  double maximum_value;
  double minimum_value;
  double *znccs;
  int p1x, p1y, p2x, p2y;
  CvRect window;
  int tick = 0, previous_tick = 0;
  double now = 0.0;
  CvFont font;
  char buffer[256];
  int i, j;

  if (argc == 1 || (argc == 2 && strlen(argv[1]) == 1 && isdigit(argv[1][0]))) {
    capture = cvCreateCameraCapture(argc == 2 ? argv[1][0] - '0' : 0);
  } else if (argc == 2) {
    capture = cvCreateFileCapture(argv[1]);
  }
  if (!capture) {
    fprintf(stderr, "ERROR: capture is NULL \n");
    return (-1);
  }

  input = cvQueryFrame(capture);
  if (!input) {
    fprintf(stderr, "Could not query frame...\n");
    return (-1);
  }

  gray = cvCreateImage(cvGetSize(input), IPL_DEPTH_8U, 1);
  cvCvtColor(input, gray, CV_BGR2GRAY);
  previous_gray = cvCreateImage(cvGetSize(input), 8, 1);

  output = cvCreateImage(cvSize(input->width, input->height), IPL_DEPTH_8U, 3);
  output->origin = input->origin;

  cvNamedWindow("Optical Flow", CV_WINDOW_AUTOSIZE);
  cvMoveWindow("Optical Flow", 200, 100);
  cvResizeWindow("Optical Flow", 640, 480);

  cvInitFont(&font, CV_FONT_HERSHEY_SIMPLEX, 1.0, 1.0, 0.0, 1, 0);

  znccs = (double *) malloc(16 * 16 * sizeof(double));

  template_width = 32;
  template_height = 32;
  win_step = WINDOW_STEP;

  for (;;) {
    input = cvQueryFrame(capture);
    if (!input) {
      fprintf(stderr, "Could not query frame...\n");
      break;
    }
    cvCopy(input, output, NULL);
    cvCvtColor(input, gray, CV_BGR2GRAY);

    for (j = 0; j < 5; j++) {
      for (i = 0; i < 6; i++) {
        previous_point.x = template_width / 2 + 8 * win_step
            + i * (8 * 2 * 3 + 32) + 10;
        previous_point.y = template_height / 2 + 8 * win_step
            + j * (8 * 2 * 3 + 32) + 10;
        p1x = (int) previous_point.x - template_width / 2 - 8 * win_step;
        p1y = (int) previous_point.y - template_height / 2 - 8 * win_step;
        p2x = (int) previous_point.x + template_width / 2 - 1 + 8 * win_step;
        p2y = (int) previous_point.y + template_height / 2 - 1 + 8 * win_step;
        cvRectangle(output, cvPoint(p1x, p1y), cvPoint(p2x, p2y),
            CV_RGB(0, 0, 255), 1, // thickness, -1=filled
            8, // line_type, 8=8-connected
            0);
        search_template(previous_gray, gray, previous_point,
            &current_points[i][j], znccs, &maximum_value, &minimum_value,
            cvSize(template_width, template_height), win_step);
        cvLine(output, cvPoint(previous_point.x, previous_point.y),
            cvPoint(current_points[i][j].x, current_points[i][j].y),
            CV_RGB(255, 0, 0), 1, 8, 0);
        p1x = (int) current_points[i][j].x - template_width / 2;
        p1y = (int) current_points[i][j].y - template_height / 2;
        p2x = (int) current_points[i][j].x + template_width / 2 - 1;
        p2y = (int) current_points[i][j].y + template_height / 2 - 1;
        if (maximum_value > THRESHOLD) {
          cvRectangle(output, cvPoint(p1x, p1y), cvPoint(p2x, p2y),
              CV_RGB(0, 255, 0), 1, // thickness, 1
              8, // line_type, 8=8-connected
              0);
        } else {
          cvRectangle(output, cvPoint(p1x, p1y), cvPoint(p2x, p2y),
              CV_RGB(255, 0, 0), 1, // thickness, 1
              8, // line_type, 8=8-connected
              0);
        }
      }
    }
    cvCopy(gray, previous_gray, NULL);

    sprintf(buffer, "%3.1lfms", now / 1000);
    cvPutText(output, buffer, cvPoint(50, 150), &font, CV_RGB(255, 0, 0));

    cvShowImage("Optical Flow", output);

    //If a certain key pressed
    if (cvWaitKey(10) >= 0) {
      break;
    }
    tick = cvGetTickCount();
    now = (tick - previous_tick) / cvGetTickFrequency();
    previous_tick = tick;
  }

  free(znccs);

  cvReleaseImage(&previous_gray);
  cvReleaseImage(&gray);
  cvReleaseImage(&output);

  cvReleaseCapture(&capture);
  cvDestroyWindow("Previous");
  cvDestroyWindow("Optical Flow");

  return (0);
}
예제 #28
0
파일: main.cpp 프로젝트: Barbakas/windage
void  main()
{
	char message[100];
	cvNamedWindow("template");
	cvNamedWindow("sampling");
	cvNamedWindow("result");

	// initialize
	int width = WIDTH;
	int height = HEIGHT;
	int startX = (width-TEMPLATE_WIDTH)/2;
	int startY = (height-TEMPLATE_HEIGHT)/2;
	
	IplImage* inputImage = NULL;
	IplImage* grayImage = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 1);
	IplImage* resultImage = cvCreateImage(cvSize(width, height), IPL_DEPTH_8U, 3);
	IplImage* templateImage = cvCreateImage(cvSize(TEMPLATE_WIDTH, TEMPLATE_HEIGHT), IPL_DEPTH_8U, 1);
	IplImage* samplingImage = NULL;

	// initial template & homography
	CvRect rect = cvRect(startX, startY, TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
	windage::Matrix3 homography(1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0, 1.0);
	homography._13 = startX;
	homography._23 = startY;
	windage::Matrix3 e = homography;

	// Template based Tracking using Inverse Compositional
#if USE_IC
	windage::InverseCompositional* tracker = new windage::InverseCompositional(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
#if USE_ESM
	windage::HomographyESM* tracker = new windage::HomographyESM(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
	tracker->SetInitialHomography(e);

	// homography update stack
	std::vector<windage::Matrix3> homographyList;

	// camera
	CvCapture* capture = cvCaptureFromCAM(CV_CAP_ANY);

	bool isTrained = false;
	bool processing =true;
	while(processing)
	{
		inputImage = cvRetrieveFrame(capture);
		cvResize(inputImage, resultImage);
		cvCvtColor(resultImage, grayImage, CV_BGR2GRAY);
		
		if(GAUSSIAN_BLUR > 0)
			cvSmooth(grayImage, grayImage, CV_GAUSSIAN, GAUSSIAN_BLUR, GAUSSIAN_BLUR);

		// processing
		int64 startTime = cvGetTickCount();
		
		float error = 0.0;
		float delta = 1.0;
		int iter = 0;
		homographyList.clear();
		for(iter=0; iter<MAX_ITERATION; iter++)
		{
			error = tracker->UpdateHomography(grayImage, &delta);
			homography = tracker->GetHomography();
			homographyList.push_back(homography);

//			if(delta < HOMOGRAPHY_DELTA)
//				break;
		}
		int64 endTime = cvGetTickCount();
		samplingImage = tracker->GetSamplingImage();

		// draw result
		int count = homographyList.size();
		for(int i=0; i<count; i++)
 			DrawResult(resultImage, homographyList[i], CV_RGB(((count-i)/(double)count) * 255.0, (i/(double)count) * 255.0, 0), 1);
 		
		double processingTime = (endTime - startTime)/(cvGetTickFrequency() * 1000.0);
		sprintf_s(message, "processing time : %.2lf ms (%02d iter), error : %.2lf", processingTime, iter, error);
		std::cout << message << std::endl;

#if USE_IC
		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 15), "Inverse Compositional", 0.6);
#endif
#if USE_ESM
		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 15), "Efficient Second-order Minimization", 0.6);
#endif
		windage::Utils::DrawTextToImage(resultImage, cvPoint(5, 35), message, 0.6);

		// draw image
		cvShowImage("sampling", samplingImage);
		cvShowImage("result", resultImage);

		char ch = cvWaitKey(1);
		switch(ch)
		{
		case ' ':
			cvSetImageROI(grayImage, rect);
			cvCopyImage(grayImage, templateImage);
			cvShowImage("template", templateImage);
			cvResetImageROI(grayImage);

			tracker->AttatchTemplateImage(templateImage);
			tracker->SetInitialHomography(e);
			tracker->Initialize();
			break;
		case 'r':
		case 'R':
			delete tracker;
			tracker = NULL;
#if USE_IC
			tracker = new windage::InverseCompositional(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
#if USE_ESM
			tracker = new windage::HomographyESM(TEMPLATE_WIDTH, TEMPLATE_HEIGHT);
#endif
			tracker->SetInitialHomography(e);
			break;
		case 'q':
		case 'Q':
			processing = false;
			break;
		}

	}

	cvReleaseCapture(&capture);

	cvReleaseImage(&grayImage);
	cvReleaseImage(&resultImage);
	cvDestroyAllWindows();
}
/*
This file contain simple implementation of BlobTrackerAuto virtual interface
This module just connected other low level 3 modules 
(foreground estimator + BlobDetector + BlobTracker)
and some simple code to detect "lost tracking"
The track is lost when integral of foreground mask image by blob area has low value 
*/
#include "_cvaux.h"
#include <time.h>

/* list of Blob Detection modules */
CvBlobDetector* cvCreateBlobDetectorSimple();

/* Get frequency for each module time working estimation: */
static double FREQ = 1000*cvGetTickFrequency();

#if 1
#define COUNTNUM 100
#define TIME_BEGIN() \
{\
    static double   _TimeSum = 0;\
    static int      _Count = 0;\
    static int      _CountBlob = 0;\
    int64           _TickCount = cvGetTickCount();\

#define TIME_END(_name_,_BlobNum_)    \
    _Count++;\
    _CountBlob+=_BlobNum_;\
    _TimeSum += (cvGetTickCount()-_TickCount)/FREQ;\
    if(m_TimesFile)if(_Count%COUNTNUM==0)\
int main( int argc, char** argv )
{
	char path[1024];
	IplImage* img;
	help();
	if (argc!=2)
	{
		strcpy(path,"puzzle.png");
		img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
		if (!img)
		{
			printf("\nUsage: mser_sample <path_to_image>\n");
			return 0;
		}
	}
	else
	{
		strcpy(path,argv[1]);
		img = cvLoadImage( path, CV_LOAD_IMAGE_GRAYSCALE );
	}
	
	if (!img)
	{
		printf("Unable to load image %s\n",path);
		return 0;
	}
	IplImage* rsp = cvLoadImage( path, CV_LOAD_IMAGE_COLOR );
	IplImage* ellipses = cvCloneImage(rsp);
	cvCvtColor(img,ellipses,CV_GRAY2BGR);
	CvSeq* contours;
	CvMemStorage* storage= cvCreateMemStorage();
	IplImage* hsv = cvCreateImage( cvGetSize( rsp ), IPL_DEPTH_8U, 3 );
	cvCvtColor( rsp, hsv, CV_BGR2YCrCb );
	CvMSERParams params = cvMSERParams();//cvMSERParams( 5, 60, cvRound(.2*img->width*img->height), .25, .2 );

	double t = (double)cvGetTickCount();
	cvExtractMSER( hsv, NULL, &contours, storage, params );
	t = cvGetTickCount() - t;
	printf( "MSER extracted %d contours in %g ms.\n", contours->total, t/((double)cvGetTickFrequency()*1000.) );
	uchar* rsptr = (uchar*)rsp->imageData;
	// draw mser with different color
	for ( int i = contours->total-1; i >= 0; i-- )
	{
		CvSeq* r = *(CvSeq**)cvGetSeqElem( contours, i );
		for ( int j = 0; j < r->total; j++ )
		{
			CvPoint* pt = CV_GET_SEQ_ELEM( CvPoint, r, j );
			rsptr[pt->x*3+pt->y*rsp->widthStep] = bcolors[i%9][2];
			rsptr[pt->x*3+1+pt->y*rsp->widthStep] = bcolors[i%9][1];
			rsptr[pt->x*3+2+pt->y*rsp->widthStep] = bcolors[i%9][0];
		}
	}
	// find ellipse ( it seems cvfitellipse2 have error or sth?
	for ( int i = 0; i < contours->total; i++ )
	{
		CvContour* r = *(CvContour**)cvGetSeqElem( contours, i );
		CvBox2D box = cvFitEllipse2( r );
		box.angle=(float)CV_PI/2-box.angle;
		
		if ( r->color > 0 )
			cvEllipseBox( ellipses, box, colors[9], 2 );
		else
			cvEllipseBox( ellipses, box, colors[2], 2 );
			
	}

	cvSaveImage( "rsp.png", rsp );

	cvNamedWindow( "original", 0 );
	cvShowImage( "original", img );
	
	cvNamedWindow( "response", 0 );
	cvShowImage( "response", rsp );

	cvNamedWindow( "ellipses", 0 );
	cvShowImage( "ellipses", ellipses );

	cvWaitKey(0);

	cvDestroyWindow( "original" );
	cvDestroyWindow( "response" );
	cvDestroyWindow( "ellipses" );
	cvReleaseImage(&rsp);
	cvReleaseImage(&img);
	cvReleaseImage(&ellipses);
	
}