Пример #1
0
// get next hand position using Kalman Filter
vector<Point2f> visionUtils::getPredictedHandPosition(Point2f currentPoint, int mode)
{
    vector<Point2f> predictedPoint;

    // First predict, to update the internal statePre variable
    Mat prediction = KF.predict();
    Point predictPt(prediction.at<float>(0),prediction.at<float>(1));

    measurement(0) = currentPoint.x;
    measurement(1) = currentPoint.y;

    // The update phase
    Mat estimated = KF.correct(measurement);

    Point statePt(estimated.at<float>(0),estimated.at<float>(1));
    Point measPt(measurement(0),measurement(1));

    middlePointV = measPt;
    kalmanMiddlePointV = statePt;

    predictedPoint.push_back(middlePointV);
    predictedPoint.push_back(kalmanMiddlePointV);

    return predictedPoint;
}
Пример #2
0
void Kalman::filter(cv::Point2f center, cv::Mat &image_tracking)
{
    std::ostringstream num_str;
    cv::Mat_<float> measurement(2,1);
    cv::Mat processNoise(4, 1, CV_32F);

    //Kalman
    int state_near_id = getStateIDNearest(center);
    if(state_near_id <= -1)
    {
        return;
    }

    copy(kman, kman_prevs[state_near_id]);
    kman.state(0) = center.x;
    kman.state(1) = center.y;

    //Prediction
    cv::Mat prediction = kman.KF.predict();
    //std::cout<<prediction <<" ";
    cv::Point predictPt(prediction.at<float>(0),prediction.at<float>(1));

    //generate measurement
    randn( measurement, cv::Scalar::all(0),
           cv::Scalar::all(kman.KF.measurementNoiseCov.at<float>(0)));
    measurement += kman.KF.measurementMatrix*kman.state;
    cv::Point measPt(measurement(0), measurement(1));

    //Correct
    cv::Mat estimated = kman.KF.correct(measurement);
    cv::Point correctPt(estimated.at<float>(0),estimated.at<float>(1));

    //num_str<< i <<"-p";
    AMat::drawCross(predictPt, cv::Scalar(255, 0, 0), 1, image_tracking, num_str.str());
    //num_str.str("");
    //num_str<< i <<"-m";
    AMat::drawCross(measPt, cv::Scalar(0, 255, 0), 1, image_tracking, num_str.str());
    //num_str.str("");
    //num_str<< i <<"-c";
    AMat::drawCross(correctPt, cv::Scalar(0, 0, 255), 1, image_tracking, num_str.str());
    //num_str.str("");
    /*
      for (int i = 0; i < (int)predicts.size()-1; i++) {
      line(image_tracking, predicts[i], predicts[i+1], Scalar(255, 0, 0), 1);
      }
      for (int i = 0; i < (int)measures.size()-1; i++) {
      line(image_tracking, measures[i], measures[i+1], Scalar(0,255,0), 1);
      }
      for (int i = 0; i < (int)corrects.size()-1; i++) {
      line(image_tracking, corrects[i], corrects[i+1], Scalar(0, 0, 255), 1);
      }
    */
    //randn( processNoise, cv::Scalar(0),
//	   cv::Scalar::all(sqrt(kman.KF.processNoiseCov.at<float>(0, 0))));
    //   kman.state = kman.KF.transitionMatrix*kman.state + processNoise;
}
Пример #3
0
int main(int argc, char *argv[])
{

    //test pour savoir si l'utilisateur a renseigne un parametre
    if(argc <= 3)
    {
        std::cout<<"---------------------------------------"<<std::endl<<
                   "Veuillez rentrer la methode choisie :  "<<std::endl<<
                   "- template_tracking"                    <<std::endl<<
                   "- LK_tracking"                          <<std::endl<<
                   "- farneback"                            <<std::endl<<
                   "- camshift_kalman"                      <<std::endl<<
                   "- background_lk"                        <<std::endl<<
                   "- background_kalman"         <<std::endl<<std::endl<<
                   "Le type de format : "                   <<std::endl<<
                   "- video"                                <<std::endl<<
                   "- image"                     <<std::endl<<std::endl<<
                   "Le nom du fichier d'input"              <<std::endl<<
                   "---------------------------------------"<<std::endl;
        std::exit(EXIT_FAILURE);
    }



    //------------------VARIABLES----------------------------------------------//


    //Variables communes

    choiceAlgo algo;
    formatVideo format;
    std::string inputFileName(argv[3]);
    int nbTrames = 501;
    double fps = 0;

    std::vector<cv::Mat> sequence;

    cv::Mat previousSequence;

    int nbPedestrians = 0;

    cv::HOGDescriptor hog;
    hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());

    std::vector<cv::Rect> detectedPedestrian;


    // HOG + Good feature to track + LK
    std::vector<std::vector<cv::Point2f>> featuresDetected;
    std::vector<std::vector<cv::Point2f>> previousFeaturesDetected;



    // HOG + Template tracking
    std::vector<cv::Rect> boxes;
    std::vector<cv::Rect> previousBoxes;



    // Optical flow farneback
    cv::Mat flow;
    cv::Mat imGray;
    cv::Mat imGrayPrev;



    //camshift and kalman filter
    std::vector<cv::MatND> backProj;
    std::vector<cv::Rect> roiHogDetected;
    std::vector<cv::Rect> roiCamShift;
    std::vector<bool> detected;
    std::vector<cv::Mat> hist;
    std::vector<cv::RotatedRect> rectCamShift;
    cv::Point2f rect_points[4];


    //--------------------------------------------------------------------------------------//
    //Background substraction, pour le tracking LK et goodfeaturestotrack regarder au dessus
    trackingOption tracking;
    cv::Ptr<cv::BackgroundSubtractor> pMOG2;
    std::vector<cv::Rect> detectedPedestrianFiltered;

    cv::KalmanFilter KF(4,2,0,CV_32F);
    cv::Mat_<float> measurement(2,1);
    cv::Mat prediction;
    cv::Mat estimated;

    pMOG2 = cv::createBackgroundSubtractorMOG2();


    //Avec ajout du Haar cascade classifier
    std::vector<std::vector<cv::Rect>> rect_upper_body;
    cv::CascadeClassifier classifier;
    std::string upper_body_cascade_name = "haarcascade_fullbody.xml";

    if(!classifier.load(upper_body_cascade_name))
    {
        std::cout<<"le fichier "<<upper_body_cascade_name<<" ne peut etre charge"<<std::endl;
        return -1;
    }

    //--------------------------------------------------------------------------------------//


    //Background substraction and Kalman
    bool initKalman = true;
    cv::KalmanFilter Kalman(4,2,0,CV_32F);
    cv::Mat_<float> measurmt(2,1);
    cv::Mat predict;
    cv::Mat estim;

    Kalman.transitionMatrix.at<float>(0,0) = 1;
    Kalman.transitionMatrix.at<float>(0,1) = 0;
    Kalman.transitionMatrix.at<float>(0,2) = 1;
    Kalman.transitionMatrix.at<float>(0,3) = 0;
    Kalman.transitionMatrix.at<float>(1,0) = 0;
    Kalman.transitionMatrix.at<float>(1,1) = 1;
    Kalman.transitionMatrix.at<float>(1,2) = 0;
    Kalman.transitionMatrix.at<float>(1,3) = 1;
    Kalman.transitionMatrix.at<float>(2,0) = 0;
    Kalman.transitionMatrix.at<float>(2,1) = 0;
    Kalman.transitionMatrix.at<float>(2,2) = 1;
    Kalman.transitionMatrix.at<float>(2,3) = 0;
    Kalman.transitionMatrix.at<float>(3,0) = 0;
    Kalman.transitionMatrix.at<float>(3,1) = 0;
    Kalman.transitionMatrix.at<float>(3,2) = 0;
    Kalman.transitionMatrix.at<float>(3,3) = 1;

    measurmt.setTo(cv::Scalar(0));

    cv::setIdentity(Kalman.measurementMatrix);
    cv::setIdentity(Kalman.processNoiseCov, cv::Scalar::all(1e-4));
    cv::setIdentity(Kalman.measurementNoiseCov, cv::Scalar::all(1e-1));
    cv::setIdentity(Kalman.errorCovPost, cv::Scalar::all(.1));


    cv::Rect rectK;
    cv::Rect prevRectK;

    //acquisition de la video
    algo = detectAlgo(std::string(argv[1]));
    format = detectFormat(std::string(argv[2]));





    //------------------VIDEO--------------------------------------------------//

    if(format == SEQUENCE_IMAGE)
        sequence.resize(nbTrames);
    else if(format == VIDEO)
        std::cout<<"video"<<std::endl;

    extractVideoData(sequence, format, inputFileName, nbTrames, fps);

    cv::namedWindow("Video", cv::WINDOW_AUTOSIZE);



    //------------------TRAITEMENT-VIDEO---------------------------------------//

    for(int i=0;i<nbTrames;i++)
    {

        if(i>0)
            previousSequence = sequence[i-1];
        else
            previousSequence = sequence[i];




        ///------------------HOG + Good Features to track + LK-----------------//

        if(algo == HOG_GOODFEATURESTOTRACK_LK)
        {
            if(i%20 == 0)
            {
                detectedPedestrian = hogDetection(sequence[i], hog);
                nbPedestrians = detectedPedestrian.size();

                if(nbPedestrians != 0)
                {
                    featuresDetected = featuresDetection(sequence[i], detectedPedestrian);
                    previousFeaturesDetected.resize(featuresDetected.size());
                    previousFeaturesDetected = featuresDetected;
                }
            }
            else if(previousFeaturesDetected.size() != 0)
            {
                featuresDetected = lucasKanadeTracking(previousSequence, sequence[i], previousFeaturesDetected);

                previousFeaturesDetected.clear();
                previousFeaturesDetected.resize(featuresDetected.size());
                previousFeaturesDetected = featuresDetected;
            }


            //--------Representation--------------------

            /*
            cv::Scalar myColor;

            for(size_t j=0;j<featuresDetected.size();j++)
            {
                if(j%3 == 0)
                    myColor = cv::Scalar(0,0,cv::RNG().uniform(200,255));

                else if(j%2 == 0)
                    myColor = cv::Scalar(0,cv::RNG().uniform(200,255),0);

                else
                    myColor = cv::Scalar(cv::RNG().uniform(200,255),0,0);

                for(size_t k=0;k<featuresDetected[j].size();k++)
                {
                    cv::circle(sequence[i], featuresDetected[j][k], 1, myColor,-1);
                }
            }
            */


            for(size_t j=0;j<featuresDetected.size();j++)
            {
                cv::rectangle(sequence[i], cv::boundingRect(featuresDetected[j]), cv::Scalar( 0, 0, 255), 2, 8, 0 );
            }


            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }





        ///------------------HOG + Template Tracking---------------------------//

        else if(algo == HOG_TEMPLATE_TRACKING)
        {
            if(i%20 == 0)
            {
                detectedPedestrian = hogDetection(sequence[i], hog);
                nbPedestrians = detectedPedestrian.size();

                if(nbPedestrians != 0)
                {
                    boxes = templateTracking(sequence[i], detectedPedestrian, CV_TM_CCORR_NORMED);
                    previousBoxes.resize(boxes.size());
                    previousBoxes = boxes;
                }
            }
            else if(previousBoxes.size() != 0)
            {
                boxes = templateTracking(sequence[i], previousBoxes, CV_TM_CCORR_NORMED);

                previousBoxes.clear();
                previousBoxes.resize(boxes.size());
                previousBoxes = boxes;
            }

            //--------Representation--------------------

            for(size_t j=0;j<boxes.size();j++)
            {
                cv::rectangle(sequence[i], boxes[j], cv::Scalar( 0, 0, 255), 2, 8, 0 );
            }


            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }






        ///------------------HOG + Optical Flow Farneback----------------------//

        else if(algo == OPT_FLOW_FARNEBACK)
        {
            if(i!=0)
            {
                flow = cv::Mat::zeros(sequence[i].size(), CV_32FC2);
                cv::cvtColor(sequence[i], imGray, CV_BGR2GRAY);
                cv::cvtColor(sequence[i-1], imGrayPrev, CV_BGR2GRAY);

                cv::calcOpticalFlowFarneback(imGrayPrev, imGray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);


                //-----------------Representation------------------------------//

                drawOptFlowMap(flow, imGrayPrev, 16, CV_RGB(0, 255, 0)); //dessin test

                //affichage de la video
                cv::imshow("Video", imGrayPrev);
            }
        }





        ///--------------HOG+Camshift + Kalman Filter--------------------------//

        else if(algo == CAMSHIFT_KALMAN_FILTER)
        {

            //camshift
            if(i%20 == 0 && roiCamShift.size() == 0)
            {
                roiHogDetected = hogDetection(sequence[i], hog);
                refineROI(roiCamShift, detected, roiHogDetected);

                //test
                if(roiCamShift.size() != 0)
                {
                    /*
                    roiCamShift[0].x += 30;
                    roiCamShift[0].width -= 60;
                    roiCamShift[0].y += 40;
                    roiCamShift[0].height -= 100;
                    */
                    roiCamShift[0].x += roiCamShift[0].width/2;
                    roiCamShift[0].width = roiCamShift[0].width/3;
                    //roiCamShift[0].y += roiCamShift[0].height/2;
                    roiCamShift[0].height = roiCamShift[0].height/3;
                }
                //
            }

            backProj = computeProbImage(sequence[i], roiCamShift, hist, detected);

            if (roiCamShift.size() != 0)
                cv::imshow("temp", backProj[0]);
            ///-------Test-Camshift--------------------///

            rectCamShift.resize(roiCamShift.size());

            for(size_t j=0;j<roiCamShift.size();j++)
            {
                /*
                std::cout<<roiCamShift[j]<<std::endl;
                cv::rectangle(backProj[j], roiCamShift[j], cv::Scalar( 255, 0, 0), 2, 8, 0 ); //DEBUG
                cv::imshow("before camshift", backProj[j]);
                cv::waitKey(0);
                */
                cv::Rect rectMeanShift;
                rectMeanShift = roiCamShift[j];
                cv::meanShift(backProj[j], rectMeanShift, cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 10, 1));

                cv::rectangle(sequence[i], rectMeanShift, cv::Scalar( 0, 255, 0), 2, 8, 0 );

                rectCamShift[j] = cv::CamShift(backProj[j], roiCamShift[j], cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 10, 1));
                rectCamShift[j].points(rect_points);


                for(int k = 0; k < 4; k++)
                    cv::line(sequence[i], rect_points[k], rect_points[(k+1)%4], cv::Scalar( 0, 0, 255), 2, 8);
            }
            ///----------------------------------------///

            //-----------------Representation----------------------------------//

            //dessin du rectangle


            for(size_t j=0;j<roiCamShift.size();j++)
                cv::rectangle(sequence[i], roiCamShift[j], cv::Scalar( 255, 0, 0), 2, 8, 0 );

            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }






        ///------------------BACKGROUND-SUBSTRACTION---------------------------//

        else if(algo == BACKGROUND_LK)
        {
            if(i%10 == 0) //égal 0 pour le test
            {
                backgroundSubstractionDetection(sequence[i], detectedPedestrianFiltered, pMOG2, tracking);
            }

            if(tracking == GOOD_FEATURES_TO_TRACK)
            {
                featuresDetected.resize(detectedPedestrianFiltered.size());
                featuresDetected = featuresDetection(sequence[i], detectedPedestrianFiltered);
                previousFeaturesDetected.resize(featuresDetected.size());
                previousFeaturesDetected = featuresDetected;

                tracking = LUCAS_KANADE;

                KF.transitionMatrix.at<float>(0,0) = 1;
                KF.transitionMatrix.at<float>(0,1) = 0;
                KF.transitionMatrix.at<float>(0,2) = 1;
                KF.transitionMatrix.at<float>(0,3) = 0;
                KF.transitionMatrix.at<float>(1,0) = 0;
                KF.transitionMatrix.at<float>(1,1) = 1;
                KF.transitionMatrix.at<float>(1,2) = 0;
                KF.transitionMatrix.at<float>(1,3) = 1;
                KF.transitionMatrix.at<float>(2,0) = 0;
                KF.transitionMatrix.at<float>(2,1) = 0;
                KF.transitionMatrix.at<float>(2,2) = 1;
                KF.transitionMatrix.at<float>(2,3) = 0;
                KF.transitionMatrix.at<float>(3,0) = 0;
                KF.transitionMatrix.at<float>(3,1) = 0;
                KF.transitionMatrix.at<float>(3,2) = 0;
                KF.transitionMatrix.at<float>(3,3) = 1;

                measurement.setTo(cv::Scalar(0));

                for(size_t j=0;j<featuresDetected.size();j++)
                {
                    detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]);
                }

                KF.statePre.at<float>(0) = rectCenter(detectedPedestrianFiltered[0]).x;
                KF.statePre.at<float>(1) = rectCenter(detectedPedestrianFiltered[0]).y;
                KF.statePre.at<float>(2) = 0;
                KF.statePre.at<float>(3) = 0;

                cv::setIdentity(KF.measurementMatrix);
                cv::setIdentity(KF.processNoiseCov, cv::Scalar::all(1e-4));
                cv::setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-1));
                cv::setIdentity(KF.errorCovPost, cv::Scalar::all(.1));
            }

            else if(tracking == LUCAS_KANADE)
            {
                for(size_t j=0;j<featuresDetected.size();j++)
                {
                    detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]);
                }

                featuresDetected = lucasKanadeTracking(previousSequence, sequence[i], previousFeaturesDetected);

                previousFeaturesDetected.clear();
                previousFeaturesDetected.resize(featuresDetected.size());
                previousFeaturesDetected = featuresDetected;

                prediction = KF.predict();
                cv::Point predictPt(prediction.at<float>(0),prediction.at<float>(1));

                // Get mouse point
                measurement(0) = rectCenter(detectedPedestrianFiltered[0]).x;
                measurement(1) = rectCenter(detectedPedestrianFiltered[0]).y;

                cv::Point measPt(measurement(0),measurement(1));

                // The "correct" phase that is going to use the predicted value and our measurement
                cv::Mat estimated = KF.correct(measurement);
                cv::Point statePt(estimated.at<float>(0),estimated.at<float>(1));

                //cv::circle(sequence[i], measPt, 1, cv::Scalar(0,255,0), 7, 24);
                //cv::circle(sequence[i], predictPt, 1, cv::Scalar(0,255,255), 7, 24);
            }


            //--------Representation--------------------

            if(tracking != NOTHING_TO_TRACK)
                findUpperBody(classifier, sequence[i], detectedPedestrianFiltered, rect_upper_body);

            for(size_t j=0;j<featuresDetected.size();j++)
            {

                for(size_t k=0;k<rect_upper_body[j].size();k++)
                {
                    cv::rectangle(sequence[i], rect_upper_body[j][k], cv::Scalar( 0, 255, 0), 2, 8, 0 );
                }
                //detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]);
                cv::rectangle(sequence[i], cv::boundingRect(featuresDetected[j]), cv::Scalar( 0, 0, 255), 2, 8, 0 );
            }


            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }

        else if(algo == BACKGROUND_KALMAN)
        {
            int refresh = 6;

            if(i%refresh == 0)
            {
                backgroundSubstractionDetection(sequence[i], detectedPedestrianFiltered, pMOG2, tracking);
            }
            if(initKalman && detectedPedestrianFiltered.size() != 0)
            {
                Kalman.statePre.at<float>(0) = rectCenter(detectedPedestrianFiltered[0]).x;
                Kalman.statePre.at<float>(1) = rectCenter(detectedPedestrianFiltered[0]).y;
                Kalman.statePre.at<float>(2) = 0;
                Kalman.statePre.at<float>(3) = 0;

                initKalman = false;
            }

            if(detectedPedestrianFiltered.size() != 0)
            {
                predict = Kalman.predict();
                cv::Point predictPt(predict.at<float>(0),predict.at<float>(1));


                // The "correct" phase that is going to use the predicted value and our measurement
                if(i%refresh == 0)
                {
                    rectK = findROI(predictPt, detectedPedestrianFiltered);

                    if(!fusionRects(prevRectK, rectK))
                    {
                        cv::Point refPoint = rectCenter(rectK);


                        // Get center point
                        measurmt(0) = refPoint.x;
                        measurmt(1) = refPoint.y;


                        cv::Point measPt(measurmt(0),measurmt(1));
                        cv::Mat estim = Kalman.correct(measurmt);
                        cv::Point statePt(estim.at<float>(0),estim.at<float>(1));
                    }

                    prevRectK = rectK;
                }

                std::string str = std::to_string(sqrt(pow(Kalman.statePre.at<float>(2), 2)+pow(Kalman.statePre.at<float>(3), 2)));

                //cv::circle(sequence[i], measPt, 1, cv::Scalar(0,255,0), 7, 24);
                cv::circle(sequence[i], predictPt, 1, cv::Scalar(0,255,255), 7, 24);
                cv::putText(sequence[i], str, predictPt, cv::FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0), 2.0);
            }

            cv::imshow("Video", sequence[i]);

        }


        //------------------CLEAR-VARIABLES------------------------------------//

        detectedPedestrian.clear();
        featuresDetected.clear();
        boxes.clear();

        previousSequence.release();

        flow.release();
        imGray.release();
        imGrayPrev.release();

        roiHogDetected.clear();
        backProj.clear();

        //------------------CONDITIONS-ARRET-----------------------------------//

        if (cv::waitKey((int)(1000/fps)) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
        {
            std::cout << "esc key is pressed by user" << std::endl;
            return 0;
        }
    }

    cv::waitKey(0);
    return 0;
}
//カルマンフィルタ
void kalmanFilter()
{
	cv::VideoCapture capture(0);

	//x,y,vx,vy
	//cv::KalmanFilter kalman(4, 2, 0);
	cv::setIdentity(kalman.measurementMatrix, cv::Scalar(1.0));
	cv::setIdentity(kalman.processNoiseCov, cv::Scalar::all(1e-5));
	cv::setIdentity(kalman.measurementNoiseCov, cv::Scalar::all(0.1));
	cv::setIdentity(kalman.errorCovPost, cv::Scalar::all(1.0));

    // 等速直線運動モデル
	kalman.transitionMatrix = *(cv::Mat_<float>(4, 4) << 1,0,1,0,   0,1,0,1,  0,0,1,0,  0,0,0,1);
	//観測値を格納するもの
	cv::Mat_<float> measurement(2,1); measurement.setTo(cv::Scalar(0));
	//初期値
	kalman.statePre.at<float>(0) = initx;
	kalman.statePre.at<float>(1) = inity;
	kalman.statePre.at<float>(2) = 0;
	kalman.statePre.at<float>(3) = 0;

	cv::namedWindow("KalmanFilter", CV_WINDOW_AUTOSIZE);
	cv::setMouseCallback("KalmanFilter", _onMouse, 0);

	// メインループ
    while (!GetAsyncKeyState(VK_ESCAPE)) {
        // 画像を取得
		capture >> capframe;
		cv::Mat image = capframe.clone();
 
        // HSVに変換
		//cv::Mat hsv = image.clone();
		//cv::cvtColor(image, hsv, CV_BGR2HSV);

		std::cout << "HSV: (" << h << ", " << s << ", " << v << ")\n";

		//クリックした点のRGBと近い画素を探索し、その重心を求める
		int sumX =0, sumY = 0, counter = 0;

		for(int y = 0; y < image.rows; y++)
			for(int x = 0; x < image.cols; x++)
			{
				//RGBの差分が閾値以内ならカウント
				if(sqrt((red - image.at<cv::Vec3b>(y,x)[2]) * (red - image.at<cv::Vec3b>(y,x)[2]) +
						  (green - image.at<cv::Vec3b>(y,x)[1]) * (green - image.at<cv::Vec3b>(y,x)[1]) +
						  (blue - image.at<cv::Vec3b>(y,x)[0]) * (blue - image.at<cv::Vec3b>(y,x)[0])) <= thresh)
				{
					//色付け
					image.at<cv::Vec3b>(y,x)[2] = 255;

					sumX += x;
					sumY += y;
					counter++;
				}
			}
 
		if(counter > 0)
		{
			//観測値
			int mx = (int)(sumX/counter);
			int my = (int)(sumY/counter);
			measurement(0) = mx;
			measurement(1) = my;
			cv::Point measPt(measurement(0), measurement(1));
			//表示
			cv::circle(image, measPt, 10, cv::Scalar(0, 0, 255));

			//修正フェーズ
			cv::Mat estimated = kalman.correct(measurement);
			cv::Point statePt(estimated.at<float>(0),estimated.at<float>(1));

        }

		//予測フェーズ
		cv::Mat prediction = kalman.predict();
		cv::Point predictPt(prediction.at<float>(0),prediction.at<float>(1));
		// 表示
		cv::circle(image, predictPt, 10, cv::Scalar(0, 255, 0));
		cv::circle(image, cv::Point(20, 20), 10, CV_RGB(red, green, blue), 5);
		cv::putText(image,"target", cv::Point(0, 50), cv::FONT_HERSHEY_SIMPLEX, 0.7, CV_RGB(red, green, blue));
  
		std::cout << (int)red << ", " << (int)green << ", " <<  (int)blue << std::endl;
		cv::imshow("KalmanFilter", image);
        cvWaitKey(1);
 
    }
}
Пример #5
0
void detectionsCallback(const hannrs_msgs::VisionDetection::ConstPtr& msg){
  if(msg->positions.size() == 1){
    if(people.empty()){
      if(msg->positions.size() == 1){
// 	double dist = sqrt((msg->positions[0].x - robot.back().position.x)*(msg->positions[0].x - robot.back().position.x) + 
// 	(msg->positions[0].y - robot.back().position.y)*(msg->positions[0].y - robot.back().position.y));
// 	if(dist < 0.35){
// 	}else{
	  KF = cv::KalmanFilter(4,2,0,CV_64F);
	  KF.transitionMatrix = *(cv::Mat_<double>(4, 4) << 1,0,1,0, 0,1,0,1, 0,0,1,0, 0,0,0,1);
	  // init...
	  KF.statePre.at<double>(0) = msg->positions[0].x;
	  KF.statePre.at<double>(1) = msg->positions[0].y;
	  KF.statePre.at<double>(2) = 0;
	  KF.statePre.at<double>(3) = 0;
  // 	setIdentity(KF.measurementMatrix);
	  KF.measurementMatrix = *(cv::Mat_<double>(2, 4) << 1,0,0,0, 0,1,0,0);
	  setIdentity(KF.processNoiseCov, cv::Scalar::all(0.25));
	  setIdentity(KF.measurementNoiseCov, cv::Scalar::all(0.05));
	  setIdentity(KF.errorCovPost, cv::Scalar::all(.1));
	  	  
	  hannrs_msgs::Person person;
	  person.pose.position = msg->positions[0];
	  person.pose.orientation.w = 1.0;
	  person.status = "standing"; //person cannot enter the scene sitted
	  person.name = "Andre";
	  people.push_back(person);
	  headers.push_back(msg->header);
// 	}
      }
    }else{
      cv::Mat_<double> measurement(2,1); measurement.setTo(cv::Scalar(0));
      double dt = msg->header.stamp.toSec() - headers.back().stamp.toSec();
      
      KF.transitionMatrix = *(cv::Mat_<double>(4, 4) << 1,0,dt,0, 0,1,0,dt, 0,0,1,0, 0,0,0,1);
      
      // First predict, to update the internal statePre variable
      cv::Mat prediction = KF.predict();

      double vel_module;
      hannrs_msgs::Person person;
      if(msg->positions.size() == 1){
// 	double dist = sqrt((msg->positions[0].x - robot.back().position.x)*(msg->positions[0].x - robot.back().position.x) + 
// 	(msg->positions[0].y - robot.back().position.y)*(msg->positions[0].y - robot.back().position.y));
// 	
// 	if(dist < 0.35){
// 	}else{
	  // Get detection
	  measurement(0) = msg->positions[0].x;
	  measurement(1) = msg->positions[0].y;
		      
	  cv::Point measPt(measurement(0),measurement(1));
	  
	  // The "correct" phase that is going to use the predicted value and our measurement
	  cv::Mat estimated = KF.correct(measurement);
	  
	  
	  person.pose.position.x = estimated.at<double>(0);
	  person.pose.position.y = estimated.at<double>(1);
	  if(msg->classifications[0] == 2.0){
	    person.pose.orientation = tf::createQuaternionMsgFromYaw(msg->positions[0].z);
	    person.velocity.linear.x = 0;
	    person.velocity.linear.y = 0;
	    vel_module = -1;
	  }else{
	    person.pose.orientation = tf::createQuaternionMsgFromYaw(atan2(estimated.at<double>(3), estimated.at<double>(2)));
	    person.velocity.linear.x = estimated.at<double>(2);
	    person.velocity.linear.y = estimated.at<double>(3);
	    vel_module = sqrt(estimated.at<double>(2)*estimated.at<double>(2) + estimated.at<double>(3)*estimated.at<double>(3));
	  }
// 	}
      }else{
	person.pose.position.x = prediction.at<double>(0);
	person.pose.position.y = prediction.at<double>(1);
	if(people.back().status.compare("sitting") == 0){
	  person.pose.orientation = people.back().pose.orientation;
	  person.velocity.linear.x = 0;
	  person.velocity.linear.y = 0;
	  vel_module = -1;
	}else{
	  person.pose.orientation = tf::createQuaternionMsgFromYaw(atan2(prediction.at<double>(3), prediction.at<double>(2)));
	  person.velocity.linear.x = prediction.at<double>(2);
	  person.velocity.linear.y = prediction.at<double>(3);
	  vel_module = sqrt(prediction.at<double>(2)*prediction.at<double>(2) + prediction.at<double>(3)*prediction.at<double>(3));
	}
      }
      
      if(vel_module == -1){
	person.velocity.linear.x = 0;
	person.velocity.linear.y = 0;
	person.status = "sitting"; //person cannot enter the scene sitted
      }else if(vel_module < 0.15){
	person.velocity.linear.x = 0;
	person.velocity.linear.y = 0;
	person.status = "standing"; //person cannot enter the scene sitted
      }else{
	person.status = "walking";
      }
      
      person.name = "Andre";
      people.push_back(person);
      headers.push_back(msg->header);
    }
  }
}