Example #1
0
void OptFlowThread::run()
{
	if( !cap.isOpened() )
		return;

	Mat prevgray;
	Mat gray;
	Mat flow;
	Mat cflow; 
	Mat frame;

	while(true)
	{
		cap >> frame;
		cvtColor(frame,gray,COLOR_BGR2GRAY);

		if(prevgray.data)
		{
			//Use Gunnar Farneback algorithm to compute optical flow.
			calcOpticalFlowFarneback(prevgray,gray,flow,0.5,3,15,3,5,1.2,0);
			cvtColor(prevgray,cflow,COLOR_GRAY2BGR);

			//Draw green points.
			drawOptFlowMap(flow,cflow,16,3,Scalar(0,255,0));
			emit NewOptFlowFrame(&cflow);
		}

		std::swap(prevgray,gray);
		waitKey(30);
	}

	return;
}
Example #2
0
	void operator()(const cv::Mat& image,bool record)
	{
		// no image no fun
		if (image.empty()) return;

		int cF = currentFrame ? 1 : 0;
		int pF = currentFrame ? 0 : 1;

		// resize the image
		cv::resize(image,frames[cF],cv::Size(image.cols*scale,image.rows*scale),0,0,cv::INTER_LINEAR);

		std::cout << "Current: " << cF << " Previous: " << pF << std::endl;
		std::cout << "Image " << image.size() << " c:" << image.channels() << " > " << frames[cF].size() << std::endl;

		if (record && !output.isOpened())
		{
			// fourcc
			int fourCC = FourCC<'X','V','I','D'>::value;
			// set framerate to 1fps - easier to check in a standard video player
			if (output.open("flow.avi",fourCC,25,frames[cF].size(),false))
				{
					std::cout << "capture file opened" << std::endl;
				}		
		}

		// make a copy for the initial frame
		if (frames[pF].empty())
			frames[cF].copyTo(frames[pF]);

		if (!flow.empty())
			flow = cv::Mat::zeros(flow.size(),flow.type());

		// calculate dense optical flow
		cv::calcOpticalFlowFarneback(frames[pF],frames[cF],flow,.5,2,8,3,7,1.5,0);

		// we can't draw into the frame!
		cv::Mat outImg = frames[cF].clone();

		drawOptFlowMap(flow,outImg,8,cv::Scalar::all(255));

		cv::imshow("Flow",outImg);

		// flip the buffers
		currentFrame = !currentFrame;
		
		// record the frame
		if (record && output.isOpened())
			output.write(outImg);
		
	}
Mat DepthEstimator1::estimateDepth(const LightFieldPicture lightfield)
{
	// render images
	ImageRenderer3 renderer = ImageRenderer3();
	renderer.setLightfield(lightfield);
	renderer.setAlpha(ALPHA);

	const Vec2i leftPosition = Vec2i(-5,0);
	renderer.setPinholePosition(leftPosition);
	Mat image1 = renderer.renderImage();
	cvtColor(image1, image1, CV_RGB2GRAY);
	image1.convertTo(image1, CV_8UC1, 255.0);
		
	const Vec2i rightPosition = Vec2i(5,0);
	renderer.setPinholePosition(rightPosition);
	Mat image2 = renderer.renderImage();
	cvtColor(image2, image2, CV_RGB2GRAY);
	image2.convertTo(image2, CV_8UC1, 255.0);

	// compute optical flow
	Mat opticalFlow;
	calcOpticalFlowFarneback(image1, image2, opticalFlow, 0.5, 3, 15, 3, 5, 1.2, 0);

	Mat flowMap;
	image1.copyTo(flowMap);
	drawOptFlowMap(opticalFlow, flowMap, 16, 1.5, Scalar(0, 255, 0));

	/* start of debugging code */
	const int windowFlags = WINDOW_NORMAL;
	const string window1 = "image1";
	namedWindow(window1, windowFlags);
	imshow(window1, image1);

	const string window2 = "image2";
	namedWindow(window2, windowFlags);
	imshow(window2, image2);
	
	const string window3 = "optical flow";
	namedWindow(window3, windowFlags);
	imshow(window3, flowMap);

	//cout << "optical flow = " << opticalFlow << endl;
	
	waitKey(0);
	/* end of debugging code */

	return opticalFlow;
}
Example #4
0
int main( int argc, char** argv )
{
    CvCapture* capture = cvCreateCameraCapture(0);
    CvMat* prevgray = 0, *gray = 0, *flow = 0, *cflow = 0;
    (void)argc; (void)argv;

    help();

    if( !capture )
        return -1;

    cvNamedWindow("flow", 1);

    for(;;)
    {
        int firstFrame = gray == 0;
        IplImage* frame = cvQueryFrame(capture);
        if(!frame)
            break;
        if(!gray)
        {
            gray = cvCreateMat(frame->height, frame->width, CV_8UC1);
            prevgray = cvCreateMat(gray->rows, gray->cols, gray->type);
            flow = cvCreateMat(gray->rows, gray->cols, CV_32FC2);
            cflow = cvCreateMat(gray->rows, gray->cols, CV_8UC3);
        }
        cvCvtColor(frame, gray, CV_BGR2GRAY);

        if( !firstFrame )
        {
            cvCalcOpticalFlowFarneback(prevgray, gray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);
            cvCvtColor(prevgray, cflow, CV_GRAY2BGR);
            drawOptFlowMap(flow, cflow, 16, 1.5, CV_RGB(0, 255, 0));
            cvShowImage("flow", cflow);
        }
        if(cvWaitKey(30)>=0)
            break;
        {
        CvMat* temp;
        CV_SWAP(prevgray, gray, temp);
        }
    }
    cvReleaseCapture(&capture);
    return 0;
}
void VisualContactCatadioptric::computeTTC(){
    // variables initialized in each processing loop, memory management taken care of
    // by itself?
    float f1, f2, f;

    cv_ptr->image.copyTo(gray);

    ttc_matrix_.height = gray.rows;
    ttc_matrix_.width = gray.cols;

    if( !prevgray.empty() )
    {
        cv::calcOpticalFlowFarneback(prevgray, gray, uflow, GF_pyr_scale_, GF_levels_, GF_winsize_, GF_iterations_, GF_poly_n_, GF_poly_s_, 0);

        cv::cvtColor(prevgray, cflow, cv::COLOR_GRAY2BGR);
        uflow.copyTo(flow);
        drawOptFlowMap(cv::Scalar(0, 255, 0));

        for (int ii=0; ii<uflow.rows; ii++){
            for (int jj=0; jj<uflow.cols; jj++){
                f1 = -uflow.at<cv::Vec2f>(ii,jj)[0]/(jj-u0);
                f2 = uflow.at<cv::Vec2f>(ii,jj)[1]/(ii-v0);
                f = 1/(f2+f1);

                if (!( isinf(f) | isnan(f) ) ){
                    ttcMap.at<float>(ii,jj) = abs(f);
                }

                ttc_matrix_.array.push_back(ttcMap.at<float>(ii,jj));

            }
        }

        cv::Mat adjMap;

        ttcMap.convertTo(adjMap, CV_8UC1, 255.0/TTC_disp_scale_);

        applyColorMap(adjMap, falseColorsMap, cv::COLORMAP_JET);
    }
    // why swap, could just copy gray to prevgray, right?
    std::swap(prevgray, gray);
}
Example #6
0
int main(int argc, char *argv[])
{

    //test pour savoir si l'utilisateur a renseigne un parametre
    if(argc <= 3)
    {
        std::cout<<"---------------------------------------"<<std::endl<<
                   "Veuillez rentrer la methode choisie :  "<<std::endl<<
                   "- template_tracking"                    <<std::endl<<
                   "- LK_tracking"                          <<std::endl<<
                   "- farneback"                            <<std::endl<<
                   "- camshift_kalman"                      <<std::endl<<
                   "- background_lk"                        <<std::endl<<
                   "- background_kalman"         <<std::endl<<std::endl<<
                   "Le type de format : "                   <<std::endl<<
                   "- video"                                <<std::endl<<
                   "- image"                     <<std::endl<<std::endl<<
                   "Le nom du fichier d'input"              <<std::endl<<
                   "---------------------------------------"<<std::endl;
        std::exit(EXIT_FAILURE);
    }



    //------------------VARIABLES----------------------------------------------//


    //Variables communes

    choiceAlgo algo;
    formatVideo format;
    std::string inputFileName(argv[3]);
    int nbTrames = 501;
    double fps = 0;

    std::vector<cv::Mat> sequence;

    cv::Mat previousSequence;

    int nbPedestrians = 0;

    cv::HOGDescriptor hog;
    hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());

    std::vector<cv::Rect> detectedPedestrian;


    // HOG + Good feature to track + LK
    std::vector<std::vector<cv::Point2f>> featuresDetected;
    std::vector<std::vector<cv::Point2f>> previousFeaturesDetected;



    // HOG + Template tracking
    std::vector<cv::Rect> boxes;
    std::vector<cv::Rect> previousBoxes;



    // Optical flow farneback
    cv::Mat flow;
    cv::Mat imGray;
    cv::Mat imGrayPrev;



    //camshift and kalman filter
    std::vector<cv::MatND> backProj;
    std::vector<cv::Rect> roiHogDetected;
    std::vector<cv::Rect> roiCamShift;
    std::vector<bool> detected;
    std::vector<cv::Mat> hist;
    std::vector<cv::RotatedRect> rectCamShift;
    cv::Point2f rect_points[4];


    //--------------------------------------------------------------------------------------//
    //Background substraction, pour le tracking LK et goodfeaturestotrack regarder au dessus
    trackingOption tracking;
    cv::Ptr<cv::BackgroundSubtractor> pMOG2;
    std::vector<cv::Rect> detectedPedestrianFiltered;

    cv::KalmanFilter KF(4,2,0,CV_32F);
    cv::Mat_<float> measurement(2,1);
    cv::Mat prediction;
    cv::Mat estimated;

    pMOG2 = cv::createBackgroundSubtractorMOG2();


    //Avec ajout du Haar cascade classifier
    std::vector<std::vector<cv::Rect>> rect_upper_body;
    cv::CascadeClassifier classifier;
    std::string upper_body_cascade_name = "haarcascade_fullbody.xml";

    if(!classifier.load(upper_body_cascade_name))
    {
        std::cout<<"le fichier "<<upper_body_cascade_name<<" ne peut etre charge"<<std::endl;
        return -1;
    }

    //--------------------------------------------------------------------------------------//


    //Background substraction and Kalman
    bool initKalman = true;
    cv::KalmanFilter Kalman(4,2,0,CV_32F);
    cv::Mat_<float> measurmt(2,1);
    cv::Mat predict;
    cv::Mat estim;

    Kalman.transitionMatrix.at<float>(0,0) = 1;
    Kalman.transitionMatrix.at<float>(0,1) = 0;
    Kalman.transitionMatrix.at<float>(0,2) = 1;
    Kalman.transitionMatrix.at<float>(0,3) = 0;
    Kalman.transitionMatrix.at<float>(1,0) = 0;
    Kalman.transitionMatrix.at<float>(1,1) = 1;
    Kalman.transitionMatrix.at<float>(1,2) = 0;
    Kalman.transitionMatrix.at<float>(1,3) = 1;
    Kalman.transitionMatrix.at<float>(2,0) = 0;
    Kalman.transitionMatrix.at<float>(2,1) = 0;
    Kalman.transitionMatrix.at<float>(2,2) = 1;
    Kalman.transitionMatrix.at<float>(2,3) = 0;
    Kalman.transitionMatrix.at<float>(3,0) = 0;
    Kalman.transitionMatrix.at<float>(3,1) = 0;
    Kalman.transitionMatrix.at<float>(3,2) = 0;
    Kalman.transitionMatrix.at<float>(3,3) = 1;

    measurmt.setTo(cv::Scalar(0));

    cv::setIdentity(Kalman.measurementMatrix);
    cv::setIdentity(Kalman.processNoiseCov, cv::Scalar::all(1e-4));
    cv::setIdentity(Kalman.measurementNoiseCov, cv::Scalar::all(1e-1));
    cv::setIdentity(Kalman.errorCovPost, cv::Scalar::all(.1));


    cv::Rect rectK;
    cv::Rect prevRectK;

    //acquisition de la video
    algo = detectAlgo(std::string(argv[1]));
    format = detectFormat(std::string(argv[2]));





    //------------------VIDEO--------------------------------------------------//

    if(format == SEQUENCE_IMAGE)
        sequence.resize(nbTrames);
    else if(format == VIDEO)
        std::cout<<"video"<<std::endl;

    extractVideoData(sequence, format, inputFileName, nbTrames, fps);

    cv::namedWindow("Video", cv::WINDOW_AUTOSIZE);



    //------------------TRAITEMENT-VIDEO---------------------------------------//

    for(int i=0;i<nbTrames;i++)
    {

        if(i>0)
            previousSequence = sequence[i-1];
        else
            previousSequence = sequence[i];




        ///------------------HOG + Good Features to track + LK-----------------//

        if(algo == HOG_GOODFEATURESTOTRACK_LK)
        {
            if(i%20 == 0)
            {
                detectedPedestrian = hogDetection(sequence[i], hog);
                nbPedestrians = detectedPedestrian.size();

                if(nbPedestrians != 0)
                {
                    featuresDetected = featuresDetection(sequence[i], detectedPedestrian);
                    previousFeaturesDetected.resize(featuresDetected.size());
                    previousFeaturesDetected = featuresDetected;
                }
            }
            else if(previousFeaturesDetected.size() != 0)
            {
                featuresDetected = lucasKanadeTracking(previousSequence, sequence[i], previousFeaturesDetected);

                previousFeaturesDetected.clear();
                previousFeaturesDetected.resize(featuresDetected.size());
                previousFeaturesDetected = featuresDetected;
            }


            //--------Representation--------------------

            /*
            cv::Scalar myColor;

            for(size_t j=0;j<featuresDetected.size();j++)
            {
                if(j%3 == 0)
                    myColor = cv::Scalar(0,0,cv::RNG().uniform(200,255));

                else if(j%2 == 0)
                    myColor = cv::Scalar(0,cv::RNG().uniform(200,255),0);

                else
                    myColor = cv::Scalar(cv::RNG().uniform(200,255),0,0);

                for(size_t k=0;k<featuresDetected[j].size();k++)
                {
                    cv::circle(sequence[i], featuresDetected[j][k], 1, myColor,-1);
                }
            }
            */


            for(size_t j=0;j<featuresDetected.size();j++)
            {
                cv::rectangle(sequence[i], cv::boundingRect(featuresDetected[j]), cv::Scalar( 0, 0, 255), 2, 8, 0 );
            }


            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }





        ///------------------HOG + Template Tracking---------------------------//

        else if(algo == HOG_TEMPLATE_TRACKING)
        {
            if(i%20 == 0)
            {
                detectedPedestrian = hogDetection(sequence[i], hog);
                nbPedestrians = detectedPedestrian.size();

                if(nbPedestrians != 0)
                {
                    boxes = templateTracking(sequence[i], detectedPedestrian, CV_TM_CCORR_NORMED);
                    previousBoxes.resize(boxes.size());
                    previousBoxes = boxes;
                }
            }
            else if(previousBoxes.size() != 0)
            {
                boxes = templateTracking(sequence[i], previousBoxes, CV_TM_CCORR_NORMED);

                previousBoxes.clear();
                previousBoxes.resize(boxes.size());
                previousBoxes = boxes;
            }

            //--------Representation--------------------

            for(size_t j=0;j<boxes.size();j++)
            {
                cv::rectangle(sequence[i], boxes[j], cv::Scalar( 0, 0, 255), 2, 8, 0 );
            }


            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }






        ///------------------HOG + Optical Flow Farneback----------------------//

        else if(algo == OPT_FLOW_FARNEBACK)
        {
            if(i!=0)
            {
                flow = cv::Mat::zeros(sequence[i].size(), CV_32FC2);
                cv::cvtColor(sequence[i], imGray, CV_BGR2GRAY);
                cv::cvtColor(sequence[i-1], imGrayPrev, CV_BGR2GRAY);

                cv::calcOpticalFlowFarneback(imGrayPrev, imGray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);


                //-----------------Representation------------------------------//

                drawOptFlowMap(flow, imGrayPrev, 16, CV_RGB(0, 255, 0)); //dessin test

                //affichage de la video
                cv::imshow("Video", imGrayPrev);
            }
        }





        ///--------------HOG+Camshift + Kalman Filter--------------------------//

        else if(algo == CAMSHIFT_KALMAN_FILTER)
        {

            //camshift
            if(i%20 == 0 && roiCamShift.size() == 0)
            {
                roiHogDetected = hogDetection(sequence[i], hog);
                refineROI(roiCamShift, detected, roiHogDetected);

                //test
                if(roiCamShift.size() != 0)
                {
                    /*
                    roiCamShift[0].x += 30;
                    roiCamShift[0].width -= 60;
                    roiCamShift[0].y += 40;
                    roiCamShift[0].height -= 100;
                    */
                    roiCamShift[0].x += roiCamShift[0].width/2;
                    roiCamShift[0].width = roiCamShift[0].width/3;
                    //roiCamShift[0].y += roiCamShift[0].height/2;
                    roiCamShift[0].height = roiCamShift[0].height/3;
                }
                //
            }

            backProj = computeProbImage(sequence[i], roiCamShift, hist, detected);

            if (roiCamShift.size() != 0)
                cv::imshow("temp", backProj[0]);
            ///-------Test-Camshift--------------------///

            rectCamShift.resize(roiCamShift.size());

            for(size_t j=0;j<roiCamShift.size();j++)
            {
                /*
                std::cout<<roiCamShift[j]<<std::endl;
                cv::rectangle(backProj[j], roiCamShift[j], cv::Scalar( 255, 0, 0), 2, 8, 0 ); //DEBUG
                cv::imshow("before camshift", backProj[j]);
                cv::waitKey(0);
                */
                cv::Rect rectMeanShift;
                rectMeanShift = roiCamShift[j];
                cv::meanShift(backProj[j], rectMeanShift, cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 10, 1));

                cv::rectangle(sequence[i], rectMeanShift, cv::Scalar( 0, 255, 0), 2, 8, 0 );

                rectCamShift[j] = cv::CamShift(backProj[j], roiCamShift[j], cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 10, 1));
                rectCamShift[j].points(rect_points);


                for(int k = 0; k < 4; k++)
                    cv::line(sequence[i], rect_points[k], rect_points[(k+1)%4], cv::Scalar( 0, 0, 255), 2, 8);
            }
            ///----------------------------------------///

            //-----------------Representation----------------------------------//

            //dessin du rectangle


            for(size_t j=0;j<roiCamShift.size();j++)
                cv::rectangle(sequence[i], roiCamShift[j], cv::Scalar( 255, 0, 0), 2, 8, 0 );

            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }






        ///------------------BACKGROUND-SUBSTRACTION---------------------------//

        else if(algo == BACKGROUND_LK)
        {
            if(i%10 == 0) //égal 0 pour le test
            {
                backgroundSubstractionDetection(sequence[i], detectedPedestrianFiltered, pMOG2, tracking);
            }

            if(tracking == GOOD_FEATURES_TO_TRACK)
            {
                featuresDetected.resize(detectedPedestrianFiltered.size());
                featuresDetected = featuresDetection(sequence[i], detectedPedestrianFiltered);
                previousFeaturesDetected.resize(featuresDetected.size());
                previousFeaturesDetected = featuresDetected;

                tracking = LUCAS_KANADE;

                KF.transitionMatrix.at<float>(0,0) = 1;
                KF.transitionMatrix.at<float>(0,1) = 0;
                KF.transitionMatrix.at<float>(0,2) = 1;
                KF.transitionMatrix.at<float>(0,3) = 0;
                KF.transitionMatrix.at<float>(1,0) = 0;
                KF.transitionMatrix.at<float>(1,1) = 1;
                KF.transitionMatrix.at<float>(1,2) = 0;
                KF.transitionMatrix.at<float>(1,3) = 1;
                KF.transitionMatrix.at<float>(2,0) = 0;
                KF.transitionMatrix.at<float>(2,1) = 0;
                KF.transitionMatrix.at<float>(2,2) = 1;
                KF.transitionMatrix.at<float>(2,3) = 0;
                KF.transitionMatrix.at<float>(3,0) = 0;
                KF.transitionMatrix.at<float>(3,1) = 0;
                KF.transitionMatrix.at<float>(3,2) = 0;
                KF.transitionMatrix.at<float>(3,3) = 1;

                measurement.setTo(cv::Scalar(0));

                for(size_t j=0;j<featuresDetected.size();j++)
                {
                    detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]);
                }

                KF.statePre.at<float>(0) = rectCenter(detectedPedestrianFiltered[0]).x;
                KF.statePre.at<float>(1) = rectCenter(detectedPedestrianFiltered[0]).y;
                KF.statePre.at<float>(2) = 0;
                KF.statePre.at<float>(3) = 0;

                cv::setIdentity(KF.measurementMatrix);
                cv::setIdentity(KF.processNoiseCov, cv::Scalar::all(1e-4));
                cv::setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-1));
                cv::setIdentity(KF.errorCovPost, cv::Scalar::all(.1));
            }

            else if(tracking == LUCAS_KANADE)
            {
                for(size_t j=0;j<featuresDetected.size();j++)
                {
                    detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]);
                }

                featuresDetected = lucasKanadeTracking(previousSequence, sequence[i], previousFeaturesDetected);

                previousFeaturesDetected.clear();
                previousFeaturesDetected.resize(featuresDetected.size());
                previousFeaturesDetected = featuresDetected;

                prediction = KF.predict();
                cv::Point predictPt(prediction.at<float>(0),prediction.at<float>(1));

                // Get mouse point
                measurement(0) = rectCenter(detectedPedestrianFiltered[0]).x;
                measurement(1) = rectCenter(detectedPedestrianFiltered[0]).y;

                cv::Point measPt(measurement(0),measurement(1));

                // The "correct" phase that is going to use the predicted value and our measurement
                cv::Mat estimated = KF.correct(measurement);
                cv::Point statePt(estimated.at<float>(0),estimated.at<float>(1));

                //cv::circle(sequence[i], measPt, 1, cv::Scalar(0,255,0), 7, 24);
                //cv::circle(sequence[i], predictPt, 1, cv::Scalar(0,255,255), 7, 24);
            }


            //--------Representation--------------------

            if(tracking != NOTHING_TO_TRACK)
                findUpperBody(classifier, sequence[i], detectedPedestrianFiltered, rect_upper_body);

            for(size_t j=0;j<featuresDetected.size();j++)
            {

                for(size_t k=0;k<rect_upper_body[j].size();k++)
                {
                    cv::rectangle(sequence[i], rect_upper_body[j][k], cv::Scalar( 0, 255, 0), 2, 8, 0 );
                }
                //detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]);
                cv::rectangle(sequence[i], cv::boundingRect(featuresDetected[j]), cv::Scalar( 0, 0, 255), 2, 8, 0 );
            }


            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }

        else if(algo == BACKGROUND_KALMAN)
        {
            int refresh = 6;

            if(i%refresh == 0)
            {
                backgroundSubstractionDetection(sequence[i], detectedPedestrianFiltered, pMOG2, tracking);
            }
            if(initKalman && detectedPedestrianFiltered.size() != 0)
            {
                Kalman.statePre.at<float>(0) = rectCenter(detectedPedestrianFiltered[0]).x;
                Kalman.statePre.at<float>(1) = rectCenter(detectedPedestrianFiltered[0]).y;
                Kalman.statePre.at<float>(2) = 0;
                Kalman.statePre.at<float>(3) = 0;

                initKalman = false;
            }

            if(detectedPedestrianFiltered.size() != 0)
            {
                predict = Kalman.predict();
                cv::Point predictPt(predict.at<float>(0),predict.at<float>(1));


                // The "correct" phase that is going to use the predicted value and our measurement
                if(i%refresh == 0)
                {
                    rectK = findROI(predictPt, detectedPedestrianFiltered);

                    if(!fusionRects(prevRectK, rectK))
                    {
                        cv::Point refPoint = rectCenter(rectK);


                        // Get center point
                        measurmt(0) = refPoint.x;
                        measurmt(1) = refPoint.y;


                        cv::Point measPt(measurmt(0),measurmt(1));
                        cv::Mat estim = Kalman.correct(measurmt);
                        cv::Point statePt(estim.at<float>(0),estim.at<float>(1));
                    }

                    prevRectK = rectK;
                }

                std::string str = std::to_string(sqrt(pow(Kalman.statePre.at<float>(2), 2)+pow(Kalman.statePre.at<float>(3), 2)));

                //cv::circle(sequence[i], measPt, 1, cv::Scalar(0,255,0), 7, 24);
                cv::circle(sequence[i], predictPt, 1, cv::Scalar(0,255,255), 7, 24);
                cv::putText(sequence[i], str, predictPt, cv::FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0), 2.0);
            }

            cv::imshow("Video", sequence[i]);

        }


        //------------------CLEAR-VARIABLES------------------------------------//

        detectedPedestrian.clear();
        featuresDetected.clear();
        boxes.clear();

        previousSequence.release();

        flow.release();
        imGray.release();
        imGrayPrev.release();

        roiHogDetected.clear();
        backProj.clear();

        //------------------CONDITIONS-ARRET-----------------------------------//

        if (cv::waitKey((int)(1000/fps)) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
        {
            std::cout << "esc key is pressed by user" << std::endl;
            return 0;
        }
    }

    cv::waitKey(0);
    return 0;
}