Esempio n. 1
0
/*
 * ShowGLMult.cpp
 *
 *  Created on: 28/01/2015
 *      Author: pedro
 */

#include "ShowGLMult.h"

DetectorMult ShowGLMult::detector = DetectorMult();
Util ShowGLMult::util = Util();
int ShowGLMult::MODOS = 0;
vector<ObjetoMult> ShowGLMult::objetos = vector<ObjetoMult>();
vector<ObjetoMult> ShowGLMult::ultimasPosicoes = ShowGLMult::objetos;
Kalman ShowGLMult::kalman = Kalman();
ObjetoMult ShowGLMult::endGame = ObjetoMult();
bool ShowGLMult::gameOver = false;

ShowGLMult::ShowGLMult(DetectorMult& detector,vector<ObjetoMult> objetos, ObjetoMult& endGame){
	if(objetos.size() == detector.getBoards().size()){
		ShowGLMult::detector = detector;
		ShowGLMult::objetos = objetos;
		ShowGLMult::endGame = endGame;
		for(unsigned int i=0;i<objetos.size();i++)
			ShowGLMult::objetos.at(i).setConfig(ShowGLMult::detector.getBoards().at(i));
		ShowGLMult::ultimasPosicoes = ShowGLMult::objetos ;
	}
}
void ShowGLMult::ativarIluminacao(){
	float luzAmbiente[]={0.2,0.2,0.2,1.0};
	float luzDifusa[]={0.9,0.7,0.7,1.0};
Esempio n. 2
0
int main(int argc, char *argv[])
{

    //test pour savoir si l'utilisateur a renseigne un parametre
    if(argc <= 3)
    {
        std::cout<<"---------------------------------------"<<std::endl<<
                   "Veuillez rentrer la methode choisie :  "<<std::endl<<
                   "- template_tracking"                    <<std::endl<<
                   "- LK_tracking"                          <<std::endl<<
                   "- farneback"                            <<std::endl<<
                   "- camshift_kalman"                      <<std::endl<<
                   "- background_lk"                        <<std::endl<<
                   "- background_kalman"         <<std::endl<<std::endl<<
                   "Le type de format : "                   <<std::endl<<
                   "- video"                                <<std::endl<<
                   "- image"                     <<std::endl<<std::endl<<
                   "Le nom du fichier d'input"              <<std::endl<<
                   "---------------------------------------"<<std::endl;
        std::exit(EXIT_FAILURE);
    }



    //------------------VARIABLES----------------------------------------------//


    //Variables communes

    choiceAlgo algo;
    formatVideo format;
    std::string inputFileName(argv[3]);
    int nbTrames = 501;
    double fps = 0;

    std::vector<cv::Mat> sequence;

    cv::Mat previousSequence;

    int nbPedestrians = 0;

    cv::HOGDescriptor hog;
    hog.setSVMDetector(cv::HOGDescriptor::getDefaultPeopleDetector());

    std::vector<cv::Rect> detectedPedestrian;


    // HOG + Good feature to track + LK
    std::vector<std::vector<cv::Point2f>> featuresDetected;
    std::vector<std::vector<cv::Point2f>> previousFeaturesDetected;



    // HOG + Template tracking
    std::vector<cv::Rect> boxes;
    std::vector<cv::Rect> previousBoxes;



    // Optical flow farneback
    cv::Mat flow;
    cv::Mat imGray;
    cv::Mat imGrayPrev;



    //camshift and kalman filter
    std::vector<cv::MatND> backProj;
    std::vector<cv::Rect> roiHogDetected;
    std::vector<cv::Rect> roiCamShift;
    std::vector<bool> detected;
    std::vector<cv::Mat> hist;
    std::vector<cv::RotatedRect> rectCamShift;
    cv::Point2f rect_points[4];


    //--------------------------------------------------------------------------------------//
    //Background substraction, pour le tracking LK et goodfeaturestotrack regarder au dessus
    trackingOption tracking;
    cv::Ptr<cv::BackgroundSubtractor> pMOG2;
    std::vector<cv::Rect> detectedPedestrianFiltered;

    cv::KalmanFilter KF(4,2,0,CV_32F);
    cv::Mat_<float> measurement(2,1);
    cv::Mat prediction;
    cv::Mat estimated;

    pMOG2 = cv::createBackgroundSubtractorMOG2();


    //Avec ajout du Haar cascade classifier
    std::vector<std::vector<cv::Rect>> rect_upper_body;
    cv::CascadeClassifier classifier;
    std::string upper_body_cascade_name = "haarcascade_fullbody.xml";

    if(!classifier.load(upper_body_cascade_name))
    {
        std::cout<<"le fichier "<<upper_body_cascade_name<<" ne peut etre charge"<<std::endl;
        return -1;
    }

    //--------------------------------------------------------------------------------------//


    //Background substraction and Kalman
    bool initKalman = true;
    cv::KalmanFilter Kalman(4,2,0,CV_32F);
    cv::Mat_<float> measurmt(2,1);
    cv::Mat predict;
    cv::Mat estim;

    Kalman.transitionMatrix.at<float>(0,0) = 1;
    Kalman.transitionMatrix.at<float>(0,1) = 0;
    Kalman.transitionMatrix.at<float>(0,2) = 1;
    Kalman.transitionMatrix.at<float>(0,3) = 0;
    Kalman.transitionMatrix.at<float>(1,0) = 0;
    Kalman.transitionMatrix.at<float>(1,1) = 1;
    Kalman.transitionMatrix.at<float>(1,2) = 0;
    Kalman.transitionMatrix.at<float>(1,3) = 1;
    Kalman.transitionMatrix.at<float>(2,0) = 0;
    Kalman.transitionMatrix.at<float>(2,1) = 0;
    Kalman.transitionMatrix.at<float>(2,2) = 1;
    Kalman.transitionMatrix.at<float>(2,3) = 0;
    Kalman.transitionMatrix.at<float>(3,0) = 0;
    Kalman.transitionMatrix.at<float>(3,1) = 0;
    Kalman.transitionMatrix.at<float>(3,2) = 0;
    Kalman.transitionMatrix.at<float>(3,3) = 1;

    measurmt.setTo(cv::Scalar(0));

    cv::setIdentity(Kalman.measurementMatrix);
    cv::setIdentity(Kalman.processNoiseCov, cv::Scalar::all(1e-4));
    cv::setIdentity(Kalman.measurementNoiseCov, cv::Scalar::all(1e-1));
    cv::setIdentity(Kalman.errorCovPost, cv::Scalar::all(.1));


    cv::Rect rectK;
    cv::Rect prevRectK;

    //acquisition de la video
    algo = detectAlgo(std::string(argv[1]));
    format = detectFormat(std::string(argv[2]));





    //------------------VIDEO--------------------------------------------------//

    if(format == SEQUENCE_IMAGE)
        sequence.resize(nbTrames);
    else if(format == VIDEO)
        std::cout<<"video"<<std::endl;

    extractVideoData(sequence, format, inputFileName, nbTrames, fps);

    cv::namedWindow("Video", cv::WINDOW_AUTOSIZE);



    //------------------TRAITEMENT-VIDEO---------------------------------------//

    for(int i=0;i<nbTrames;i++)
    {

        if(i>0)
            previousSequence = sequence[i-1];
        else
            previousSequence = sequence[i];




        ///------------------HOG + Good Features to track + LK-----------------//

        if(algo == HOG_GOODFEATURESTOTRACK_LK)
        {
            if(i%20 == 0)
            {
                detectedPedestrian = hogDetection(sequence[i], hog);
                nbPedestrians = detectedPedestrian.size();

                if(nbPedestrians != 0)
                {
                    featuresDetected = featuresDetection(sequence[i], detectedPedestrian);
                    previousFeaturesDetected.resize(featuresDetected.size());
                    previousFeaturesDetected = featuresDetected;
                }
            }
            else if(previousFeaturesDetected.size() != 0)
            {
                featuresDetected = lucasKanadeTracking(previousSequence, sequence[i], previousFeaturesDetected);

                previousFeaturesDetected.clear();
                previousFeaturesDetected.resize(featuresDetected.size());
                previousFeaturesDetected = featuresDetected;
            }


            //--------Representation--------------------

            /*
            cv::Scalar myColor;

            for(size_t j=0;j<featuresDetected.size();j++)
            {
                if(j%3 == 0)
                    myColor = cv::Scalar(0,0,cv::RNG().uniform(200,255));

                else if(j%2 == 0)
                    myColor = cv::Scalar(0,cv::RNG().uniform(200,255),0);

                else
                    myColor = cv::Scalar(cv::RNG().uniform(200,255),0,0);

                for(size_t k=0;k<featuresDetected[j].size();k++)
                {
                    cv::circle(sequence[i], featuresDetected[j][k], 1, myColor,-1);
                }
            }
            */


            for(size_t j=0;j<featuresDetected.size();j++)
            {
                cv::rectangle(sequence[i], cv::boundingRect(featuresDetected[j]), cv::Scalar( 0, 0, 255), 2, 8, 0 );
            }


            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }





        ///------------------HOG + Template Tracking---------------------------//

        else if(algo == HOG_TEMPLATE_TRACKING)
        {
            if(i%20 == 0)
            {
                detectedPedestrian = hogDetection(sequence[i], hog);
                nbPedestrians = detectedPedestrian.size();

                if(nbPedestrians != 0)
                {
                    boxes = templateTracking(sequence[i], detectedPedestrian, CV_TM_CCORR_NORMED);
                    previousBoxes.resize(boxes.size());
                    previousBoxes = boxes;
                }
            }
            else if(previousBoxes.size() != 0)
            {
                boxes = templateTracking(sequence[i], previousBoxes, CV_TM_CCORR_NORMED);

                previousBoxes.clear();
                previousBoxes.resize(boxes.size());
                previousBoxes = boxes;
            }

            //--------Representation--------------------

            for(size_t j=0;j<boxes.size();j++)
            {
                cv::rectangle(sequence[i], boxes[j], cv::Scalar( 0, 0, 255), 2, 8, 0 );
            }


            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }






        ///------------------HOG + Optical Flow Farneback----------------------//

        else if(algo == OPT_FLOW_FARNEBACK)
        {
            if(i!=0)
            {
                flow = cv::Mat::zeros(sequence[i].size(), CV_32FC2);
                cv::cvtColor(sequence[i], imGray, CV_BGR2GRAY);
                cv::cvtColor(sequence[i-1], imGrayPrev, CV_BGR2GRAY);

                cv::calcOpticalFlowFarneback(imGrayPrev, imGray, flow, 0.5, 3, 15, 3, 5, 1.2, 0);


                //-----------------Representation------------------------------//

                drawOptFlowMap(flow, imGrayPrev, 16, CV_RGB(0, 255, 0)); //dessin test

                //affichage de la video
                cv::imshow("Video", imGrayPrev);
            }
        }





        ///--------------HOG+Camshift + Kalman Filter--------------------------//

        else if(algo == CAMSHIFT_KALMAN_FILTER)
        {

            //camshift
            if(i%20 == 0 && roiCamShift.size() == 0)
            {
                roiHogDetected = hogDetection(sequence[i], hog);
                refineROI(roiCamShift, detected, roiHogDetected);

                //test
                if(roiCamShift.size() != 0)
                {
                    /*
                    roiCamShift[0].x += 30;
                    roiCamShift[0].width -= 60;
                    roiCamShift[0].y += 40;
                    roiCamShift[0].height -= 100;
                    */
                    roiCamShift[0].x += roiCamShift[0].width/2;
                    roiCamShift[0].width = roiCamShift[0].width/3;
                    //roiCamShift[0].y += roiCamShift[0].height/2;
                    roiCamShift[0].height = roiCamShift[0].height/3;
                }
                //
            }

            backProj = computeProbImage(sequence[i], roiCamShift, hist, detected);

            if (roiCamShift.size() != 0)
                cv::imshow("temp", backProj[0]);
            ///-------Test-Camshift--------------------///

            rectCamShift.resize(roiCamShift.size());

            for(size_t j=0;j<roiCamShift.size();j++)
            {
                /*
                std::cout<<roiCamShift[j]<<std::endl;
                cv::rectangle(backProj[j], roiCamShift[j], cv::Scalar( 255, 0, 0), 2, 8, 0 ); //DEBUG
                cv::imshow("before camshift", backProj[j]);
                cv::waitKey(0);
                */
                cv::Rect rectMeanShift;
                rectMeanShift = roiCamShift[j];
                cv::meanShift(backProj[j], rectMeanShift, cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 10, 1));

                cv::rectangle(sequence[i], rectMeanShift, cv::Scalar( 0, 255, 0), 2, 8, 0 );

                rectCamShift[j] = cv::CamShift(backProj[j], roiCamShift[j], cv::TermCriteria(cv::TermCriteria::EPS | cv::TermCriteria::COUNT, 10, 1));
                rectCamShift[j].points(rect_points);


                for(int k = 0; k < 4; k++)
                    cv::line(sequence[i], rect_points[k], rect_points[(k+1)%4], cv::Scalar( 0, 0, 255), 2, 8);
            }
            ///----------------------------------------///

            //-----------------Representation----------------------------------//

            //dessin du rectangle


            for(size_t j=0;j<roiCamShift.size();j++)
                cv::rectangle(sequence[i], roiCamShift[j], cv::Scalar( 255, 0, 0), 2, 8, 0 );

            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }






        ///------------------BACKGROUND-SUBSTRACTION---------------------------//

        else if(algo == BACKGROUND_LK)
        {
            if(i%10 == 0) //égal 0 pour le test
            {
                backgroundSubstractionDetection(sequence[i], detectedPedestrianFiltered, pMOG2, tracking);
            }

            if(tracking == GOOD_FEATURES_TO_TRACK)
            {
                featuresDetected.resize(detectedPedestrianFiltered.size());
                featuresDetected = featuresDetection(sequence[i], detectedPedestrianFiltered);
                previousFeaturesDetected.resize(featuresDetected.size());
                previousFeaturesDetected = featuresDetected;

                tracking = LUCAS_KANADE;

                KF.transitionMatrix.at<float>(0,0) = 1;
                KF.transitionMatrix.at<float>(0,1) = 0;
                KF.transitionMatrix.at<float>(0,2) = 1;
                KF.transitionMatrix.at<float>(0,3) = 0;
                KF.transitionMatrix.at<float>(1,0) = 0;
                KF.transitionMatrix.at<float>(1,1) = 1;
                KF.transitionMatrix.at<float>(1,2) = 0;
                KF.transitionMatrix.at<float>(1,3) = 1;
                KF.transitionMatrix.at<float>(2,0) = 0;
                KF.transitionMatrix.at<float>(2,1) = 0;
                KF.transitionMatrix.at<float>(2,2) = 1;
                KF.transitionMatrix.at<float>(2,3) = 0;
                KF.transitionMatrix.at<float>(3,0) = 0;
                KF.transitionMatrix.at<float>(3,1) = 0;
                KF.transitionMatrix.at<float>(3,2) = 0;
                KF.transitionMatrix.at<float>(3,3) = 1;

                measurement.setTo(cv::Scalar(0));

                for(size_t j=0;j<featuresDetected.size();j++)
                {
                    detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]);
                }

                KF.statePre.at<float>(0) = rectCenter(detectedPedestrianFiltered[0]).x;
                KF.statePre.at<float>(1) = rectCenter(detectedPedestrianFiltered[0]).y;
                KF.statePre.at<float>(2) = 0;
                KF.statePre.at<float>(3) = 0;

                cv::setIdentity(KF.measurementMatrix);
                cv::setIdentity(KF.processNoiseCov, cv::Scalar::all(1e-4));
                cv::setIdentity(KF.measurementNoiseCov, cv::Scalar::all(1e-1));
                cv::setIdentity(KF.errorCovPost, cv::Scalar::all(.1));
            }

            else if(tracking == LUCAS_KANADE)
            {
                for(size_t j=0;j<featuresDetected.size();j++)
                {
                    detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]);
                }

                featuresDetected = lucasKanadeTracking(previousSequence, sequence[i], previousFeaturesDetected);

                previousFeaturesDetected.clear();
                previousFeaturesDetected.resize(featuresDetected.size());
                previousFeaturesDetected = featuresDetected;

                prediction = KF.predict();
                cv::Point predictPt(prediction.at<float>(0),prediction.at<float>(1));

                // Get mouse point
                measurement(0) = rectCenter(detectedPedestrianFiltered[0]).x;
                measurement(1) = rectCenter(detectedPedestrianFiltered[0]).y;

                cv::Point measPt(measurement(0),measurement(1));

                // The "correct" phase that is going to use the predicted value and our measurement
                cv::Mat estimated = KF.correct(measurement);
                cv::Point statePt(estimated.at<float>(0),estimated.at<float>(1));

                //cv::circle(sequence[i], measPt, 1, cv::Scalar(0,255,0), 7, 24);
                //cv::circle(sequence[i], predictPt, 1, cv::Scalar(0,255,255), 7, 24);
            }


            //--------Representation--------------------

            if(tracking != NOTHING_TO_TRACK)
                findUpperBody(classifier, sequence[i], detectedPedestrianFiltered, rect_upper_body);

            for(size_t j=0;j<featuresDetected.size();j++)
            {

                for(size_t k=0;k<rect_upper_body[j].size();k++)
                {
                    cv::rectangle(sequence[i], rect_upper_body[j][k], cv::Scalar( 0, 255, 0), 2, 8, 0 );
                }
                //detectedPedestrianFiltered[j] = cv::boundingRect(featuresDetected[j]);
                cv::rectangle(sequence[i], cv::boundingRect(featuresDetected[j]), cv::Scalar( 0, 0, 255), 2, 8, 0 );
            }


            //affichage de la video
            cv::imshow("Video", sequence[i]);
        }

        else if(algo == BACKGROUND_KALMAN)
        {
            int refresh = 6;

            if(i%refresh == 0)
            {
                backgroundSubstractionDetection(sequence[i], detectedPedestrianFiltered, pMOG2, tracking);
            }
            if(initKalman && detectedPedestrianFiltered.size() != 0)
            {
                Kalman.statePre.at<float>(0) = rectCenter(detectedPedestrianFiltered[0]).x;
                Kalman.statePre.at<float>(1) = rectCenter(detectedPedestrianFiltered[0]).y;
                Kalman.statePre.at<float>(2) = 0;
                Kalman.statePre.at<float>(3) = 0;

                initKalman = false;
            }

            if(detectedPedestrianFiltered.size() != 0)
            {
                predict = Kalman.predict();
                cv::Point predictPt(predict.at<float>(0),predict.at<float>(1));


                // The "correct" phase that is going to use the predicted value and our measurement
                if(i%refresh == 0)
                {
                    rectK = findROI(predictPt, detectedPedestrianFiltered);

                    if(!fusionRects(prevRectK, rectK))
                    {
                        cv::Point refPoint = rectCenter(rectK);


                        // Get center point
                        measurmt(0) = refPoint.x;
                        measurmt(1) = refPoint.y;


                        cv::Point measPt(measurmt(0),measurmt(1));
                        cv::Mat estim = Kalman.correct(measurmt);
                        cv::Point statePt(estim.at<float>(0),estim.at<float>(1));
                    }

                    prevRectK = rectK;
                }

                std::string str = std::to_string(sqrt(pow(Kalman.statePre.at<float>(2), 2)+pow(Kalman.statePre.at<float>(3), 2)));

                //cv::circle(sequence[i], measPt, 1, cv::Scalar(0,255,0), 7, 24);
                cv::circle(sequence[i], predictPt, 1, cv::Scalar(0,255,255), 7, 24);
                cv::putText(sequence[i], str, predictPt, cv::FONT_HERSHEY_PLAIN, 1.0, CV_RGB(255,0,0), 2.0);
            }

            cv::imshow("Video", sequence[i]);

        }


        //------------------CLEAR-VARIABLES------------------------------------//

        detectedPedestrian.clear();
        featuresDetected.clear();
        boxes.clear();

        previousSequence.release();

        flow.release();
        imGray.release();
        imGrayPrev.release();

        roiHogDetected.clear();
        backProj.clear();

        //------------------CONDITIONS-ARRET-----------------------------------//

        if (cv::waitKey((int)(1000/fps)) == 27) //wait for 'esc' key press for 30ms. If 'esc' key is pressed, break loop
        {
            std::cout << "esc key is pressed by user" << std::endl;
            return 0;
        }
    }

    cv::waitKey(0);
    return 0;
}
Esempio n. 3
0
STFrame* Tracking( STFrame* frameDataIn, int MaxTracks,StaticBGModel* BGModel, int FPS ){


	STFrame* frameDataOut = NULL; // frame de salida

#ifdef MEDIR_TIEMPOS
	struct timeval ti,tif; // iniciamos la estructura
	float tiempoParcial;
	gettimeofday(&tif, NULL);
	printf("\n2)Tracking:\n");
	gettimeofday(&ti, NULL);
	printf("\t1)Asignación de identidades\n");
#endif



	////////////// AÑADIR AL BUFFER /////////////
	anyadirAlFinal( frameDataIn, framesBuf);
//	MotionTemplate( framesBuf,Identities );

	////////////// ASIGNACIÓN DE IDENTIDADES ///////////

	//APLICAR EL METODO DE OPTIMIZACION HUNGARO A LA MATRIZ DE PESOS
	// Asignar identidades.
	// resolver las asociaciones usando las predicciones de kalman mediante el algoritmo Hungaro
	// Si varias dan a la misma etiquetarla como 0. Enlazar flies.
	// Se trabaja en las posiciones frame MAX_BUFFER - 1 y MAX_BUFFER -2.

	asignarIdentidades( lsTracks,frameDataIn->Flies);

	ordenarTracks( lsTracks );
#ifdef MEDIR_TIEMPOS
	tiempoParcial= obtenerTiempo( ti, 0);
	printf("\t\t-Tiempo: %5.4g ms\n", tiempoParcial);
	gettimeofday(&ti, NULL);
	fprintf(stderr,"\t2)Filtro de Kalman\n");
#endif

	/////////////// ELIMIRAR FALSOS TRACKS. APLICACIÓN DE HEURíSTICAS AL FINAL DEL BUFFER ///
	frameDataIn->numTracks = validarTracks( framesBuf, lsTracks,Identities, trackParams->MaxBlobs, frameDataIn->num_frame );

	/////////////// FILTRO DE KALMAN //////////////
	// El filtro de kalman trabaja en la posicion MAX_BUFFER -1. Ultimo elemento anyadido.
	Kalman( frameDataIn , Identities, lsTracks, trackParams);

	frameDataIn->Tracks = lsTracks;

#ifdef MEDIR_TIEMPOS
	tiempoParcial= obtenerTiempo( ti, 0);
	printf("\t\t-Tiempo: %5.4g ms\n", tiempoParcial);
#endif

	///////   FASE DE CORRECCIÓN. APLICACION DE HEURISTICAS AL FRAME DE SALIDA /////////

	despertarTrack(framesBuf, lsTracks, Identities );
	ordenarTracks( lsTracks );

	// SI BUFFER LLENO
	if( framesBuf->numeroDeElementos == trackParams->MaxBuffer ){

		corregirTracks(framesBuf,  lsTracks, Identities );


	////////// LIBERAR MEMORIA  ////////////
		frameDataOut = (STFrame*)liberarPrimero( framesBuf ) ;
		if(!frameDataOut){error(7); exit(1);}

		VisualizarEl( framesBuf, PENULTIMO, BGModel );

#ifdef MEDIR_TIEMPOS
		tiempoParcial = obtenerTiempo( tif , NULL);
		printf("Tracking correcto.Tiempo total %5.4g ms\n", tiempoParcial);
#endif
		return frameDataOut;
	}
	else {
		VerEstadoBuffer( frameDataIn->Frame, framesBuf->numeroDeElementos, trackParams->MaxBuffer);
		VisualizarEl( framesBuf, PENULTIMO, BGModel );
#ifdef MEDIR_TIEMPOS
		tiempoParcial = obtenerTiempo( tif , NULL);
		printf("Tracking correcto.Tiempo total %5.4g ms\n", tiempoParcial);
#endif
		return 0;
	}
}