Exemplo n.º 1
0
void *DetectDescribe::pthreadParallelTracking(int part2Process) {

	std::vector<uchar> status;
	std::vector<float> err;
	cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 15, 0.05);

	std::vector<cv::Point2f> points2Track, temp = std::vector<cv::Point2f>();
	cv::KeyPoint::convert(keypoints[part2Process], points2Track);

	// Calculating movement of features
	if ( points2Track.size() > 0)
	{
		cv::calcOpticalFlowPyrLK(img[part2Process], img2[part2Process],
				points2Track, temp, status, err, cvSize(5, 5), 3, termcrit);
	}
}
Exemplo n.º 2
0
void trackKlt(
    FramePtr frame_ref,
    FramePtr frame_cur,
    vector<cv::Point2f>& px_ref,
    vector<cv::Point2f>& px_cur,
    vector<Vector3d>& f_ref,
    vector<Vector3d>& f_cur,
    vector<double>& disparities)
{
  const double klt_win_size = 30.0;
  const int klt_max_iter = 30;
  const double klt_eps = 0.001;
  vector<uchar> status;
  vector<float> error;
  vector<float> min_eig_vec;
  cv::TermCriteria termcrit(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, klt_max_iter, klt_eps);
  cv::calcOpticalFlowPyrLK(frame_ref->img_pyr_[0], frame_cur->img_pyr_[0],
                           px_ref, px_cur,
                           status, error,
                           cv::Size2i(klt_win_size, klt_win_size),
                           4, termcrit, cv::OPTFLOW_USE_INITIAL_FLOW);

  vector<cv::Point2f>::iterator px_ref_it = px_ref.begin();
  vector<cv::Point2f>::iterator px_cur_it = px_cur.begin();
  vector<Vector3d>::iterator f_ref_it = f_ref.begin();
  f_cur.clear(); f_cur.reserve(px_cur.size());
  disparities.clear(); disparities.reserve(px_cur.size());
  for(size_t i=0; px_ref_it != px_ref.end(); ++i)
  {
    if(!status[i])
    {
      px_ref_it = px_ref.erase(px_ref_it);
      px_cur_it = px_cur.erase(px_cur_it);
      f_ref_it = f_ref.erase(f_ref_it);
      continue;
    }
    f_cur.push_back(frame_cur->c2f(px_cur_it->x, px_cur_it->y));
    disparities.push_back(Vector2d(px_ref_it->x - px_cur_it->x, px_ref_it->y - px_cur_it->y).norm());
    ++px_ref_it;
    ++px_cur_it;
    ++f_ref_it;
  }
}
Exemplo n.º 3
0
vector<Point2f> track_dots(IplImage* gray, IplImage* prevGray, vector<Point2f> points, CvRect* faceBox)
{
	//expands region to search for dots
	faceBox->x -= faceBox->width/2;
	if(faceBox->x < 0)
	{
		faceBox->x = 0;
	}
	faceBox->width *= 2;
	if(faceBox->x + faceBox->width > gray->width)
	{
		faceBox->width = gray->width - faceBox->x;
	}
	faceBox->y -= faceBox->height/2;
	if(faceBox->y < 0)
	{
		faceBox->y = 0;
	}
	faceBox->height *= 2;
	if(faceBox->y + faceBox->height > gray->height)
	{
		faceBox->height = gray->height - faceBox->y;
	}

	cvSetImageROI(gray,*faceBox);
	cvSetImageROI(prevGray,*faceBox);

	vector<Point2f> tempPoints;
	vector<uchar> status;
    vector<float> err;
	TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,1,0.3);
	Size winSize(20,20);

    calcOpticalFlowPyrLK(prevGray, gray, points, tempPoints, status, err, winSize, 0, termcrit, 0);

	cvResetImageROI(gray);
	cvResetImageROI(prevGray);
	return tempPoints;
}
Exemplo n.º 4
0
//--------------------------------------------------------------
void testApp::update(){
		bool bNewFrame = false;

  	ofBackground(0,0,0);
#ifdef _USE_LIVE_VIDEO
		vidGrabber.update();
		bNewFrame = vidGrabber.isFrameNew();
#else
		vidPlayer.update();
		bNewFrame = vidPlayer.isFrameNew();
#endif
    //do we have a new frame?
    if (bNewFrame){

#ifdef _USE_LIVE_VIDEO
    colorImg.setFromPixels(vidGrabber.getPixels().getData(), 320,240);
#else
    colorImg.setFromPixels(vidPlayer.getPixels().getData(), 320,240);
#endif
        grayImage = colorImg; // convert our color image to a grayscale image
        if (bLearnBakground == true) {
            grayBg = grayImage; // update the background image
            bLearnBakground = false;
        }

   		cv::Mat mat_curr(CAM_HEIGHT, CAM_WIDTH, CV_8UC1, grayImage.getPixels().getData(), cv::Mat::AUTO_STEP);
  		cv::Mat mat_prev(CAM_HEIGHT, CAM_WIDTH, CV_8UC1, grayBg.getPixels().getData(), cv::Mat::AUTO_STEP);

 		if(useFAST){
 			vector<cv::KeyPoint> keyPoints;
			cv::FAST(mat_prev, keyPoints,30,true);
			cv::KeyPoint::convert(keyPoints, prev_good_points);
		}
		else  {
			cv::goodFeaturesToTrack(mat_prev,            // input, the image from which we want to know good features to track
									prev_good_points,    // output, the points will be stored in this output vector
									100,                 // max points, maximum number of good features to track
									0.05,                // quality level, "minimal accepted quality of corners", the lower the more points we will get
									10,                  // minDistance, minimum distance between points
									cv::Mat(),           // mask
									4,                   // block size
									false,               // useHarrisDetector, makes tracking a bit better when set to true
									0.04                 // free parameter for harris detector
									);
		}


		cv::TermCriteria termcrit(cv::TermCriteria::COUNT|cv::TermCriteria::EPS,prev_good_points.size(),0.03);


		cv::calcOpticalFlowPyrLK(mat_prev,            // prev image
								mat_curr,             // curr image
								prev_good_points,     // find these points in the new image
								curr_good_points,     // result of found points
								status,               // output status vector, found points are set to 1
								error,                // each point gets an error value (see flag)
								cv::Size(21, 21),     // size of the window at each pyramid level
								0,                    // maxLevel - 0 = no pyramids, > 0 use this level of pyramids
								termcrit,             // termination criteria
								0,                    // flags OPTFLOW_USE_INITIAL_FLOW or OPTFLOW_LK_GET_MIN_EIGENVALS
								0.1                   // minEigThreshold
								);
	    grayBg = grayImage;
	}
}
Exemplo n.º 5
0
void main()
{
    bool patternfound = false;
    bool reset = false;
    bool resetAuto = false;
    int nbImages = 0;
    double moyFinale = 0;

    cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
    cv::Size winSize(31, 31);
    
    cv::Mat cameraMatrix, distCoeffs;
    cv::Mat imCalib;
    cv::Mat imCalibColor;
    cv::Mat imCalibNext;
    cv::Mat rvecs, tvecs;
    
    std::vector<cv::Point2f> imagePoints;
    std::vector<cv::Point3f> objectPoints;
    std::vector<cv::Point3f> cubeObjectPoints;
    std::vector<std::vector<cv::Point2f>> chessCornersInit(2);
    std::vector<cv::Point3f> chessCorners3D;
    std::vector<double> distances;
    double moyDistances;
    

    // Creation des points a projeter
    for(int x = 0; x < COLCHESSBOARD; x++)
        for(int y = 0; y < ROWCHESSBOARD; y++)
            objectPoints.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f));

    // Creation des points a projeter
    cubeObjectPoints.push_back(cv::Point3f(52, 26, 0));
    cubeObjectPoints.push_back(cv::Point3f(156, 26, 0));
    cubeObjectPoints.push_back(cv::Point3f(156, 128, 0));
    cubeObjectPoints.push_back(cv::Point3f(52, 128, 0));
    cubeObjectPoints.push_back(cv::Point3f(52, 26, 104));
    cubeObjectPoints.push_back(cv::Point3f(156, 26, 104));
    cubeObjectPoints.push_back(cv::Point3f(156, 128, 104));
    cubeObjectPoints.push_back(cv::Point3f(52, 128, 104));

    // Creation des coins de la mire
    for(int x = 0; x < COLCHESSBOARD; x++)
        for(int y = 0; y < ROWCHESSBOARD; y++)
            chessCorners3D.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f));  

    cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ);

    fs["cameraMatrix"] >> cameraMatrix;
    fs["distCoeffs"] >> distCoeffs;

    fs.release();

    cv::VideoCapture vcap(0); 
    if(!vcap.isOpened()){
        std::cout << "FAIL!" << std::endl;
        return;
    }

    cv::Mat *frame = new cv::Mat(cv::Mat::zeros(vcap.get(CV_CAP_PROP_FRAME_HEIGHT), vcap.get(CV_CAP_PROP_FRAME_WIDTH), CV_8UC3));
    
    do
    {
        vcap >> *frame;
    }while(frame->empty());

    osg::ref_ptr<osg::Image> backgroundImage = new osg::Image;
    backgroundImage->setImage(frame->cols, frame->rows, 3,
        GL_RGB, GL_BGR, GL_UNSIGNED_BYTE,
        (uchar*)(frame->data),
        osg::Image::AllocationMode::NO_DELETE, 1);

    // read the scene from the list of file specified commandline args.
    osg::ref_ptr<osg::Group> group = new osg::Group;
    osg::ref_ptr<osg::Node> objet3D = osgDB::readNodeFile("dumptruck.osgt");
    osg::ref_ptr<osg::Camera> cam = createHUD(backgroundImage);
    osg::ref_ptr<osg::PositionAttitudeTransform> pat = new osg::PositionAttitudeTransform;
    osg::ref_ptr<osg::PositionAttitudeTransform> pat2 = new osg::PositionAttitudeTransform;

    // construct the viewer.
    osgViewer::Viewer viewer;

    // add the HUD subgraph.
    group->addChild(cam);
    pat->addChild(pat2);
    pat2->addChild(objet3D);
    group->addChild(pat);

    // set the scene to render
    viewer.setSceneData(group.get());
    viewer.realize();  // set up windows and associated threads.

    char key = 0;
    bool detectionMire = false;

    cv::Mat Rc, C = cv::Mat(3, 1, CV_64F), rotVecInv;
    pat->setScale(osg::Vec3d(0.08, 0.08, 0.08));
    pat->setAttitude(osg::Quat(osg::DegreesToRadians(180.0), osg::Vec3d(1.0, 0.0, 0.0)));
    pat2->setPosition(osg::Vec3d(15.0, 4.0, 5.0));

/*  
    // projection
    double fx = cameraMatrix.at<double>(0, 0);
    double fy = cameraMatrix.at<double>(1, 1);
    double cx = cameraMatrix.at<double>(1, 2);
    double cy = cameraMatrix.at<double>(1, 0);
    double W  = (double)frame->cols;
    double H  = (double)frame->rows;
    double near = .1;
    double far = 100.0;

    osg::Matrixd projectionMatrix;
    projectionMatrix.set(
        2 * fx / W, 0, 0, 0, 
        0, 2 * fy / H, 0, 0, 
        2 * (cx / W) - 1, 2 * (cy - H) - 1, (far + near) / (far - near), 1,
        0, 0, 2 * far * near / (near - far), 0);
        
    projectionMatrix.set(
        2 * fx / W, 0, 0, 0, 
        0, 2 * fy / H, 0, 0,
        2 * (cx / W) - 1, 2 * (cy / H) - 1, (far + near) / (far - near), 1,
        0, 0, 2 * far * near / (near - far), 0);

    viewer.getCamera()->setProjectionMatrix(projectionMatrix);*/


    do
    {       
        patternfound = false;
        resetAuto = false;
        detectionMire = false;
            
        imagePoints.clear();
        chessCornersInit[0].clear();
        chessCornersInit[1].clear();
        moyDistances = 0;
        distances.clear();
        imCalibNext.release();
        
        group->removeChild(pat);
        std::cout << "recherche de mire" << std::endl;

        do
        {
            vcap >> *frame;
            backgroundImage->dirty();
            detectionMire = detecterMire(frame, &chessCornersInit[1], &imCalibNext);
            viewer.frame();
        }while(!detectionMire && !viewer.done());

        if(viewer.done())
            break;

        std::cout << "mire detectee" << std::endl << std::endl;
        group->addChild(pat);

        do
        {           
            vcap >> *frame;
            
            cv::Mat rotVec = trackingMire(frame, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs);

            imagePoints = dessinerPoints(frame, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs);
            
            cv::transpose(rotVec, Rc);
            cv::invert(rotVec, rotVecInv);

            for(int i = 0; i < 3; i++)
                C.at<double>(i, 0) = -1 * (
                rotVecInv.at<double>(i, 0) * tvecs.at<double>(0, 0) +
                rotVecInv.at<double>(i, 1) * tvecs.at<double>(1, 0) +
                rotVecInv.at<double>(i, 2) * tvecs.at<double>(2, 0));
            
            osg::Matrixd viewMatrixR, viewMatrixT, viewMatrix90;

            viewMatrixT.makeTranslate(
                -C.at<double>(0, 0) / 100,
                C.at<double>(1, 0) / 100,
                C.at<double>(2, 0) / 100);
            
            double r11 = rotVec.at<double>(0, 0);
            double r21 = rotVec.at<double>(1, 0);
            double r31 = rotVec.at<double>(2, 0);
            double r32 = rotVec.at<double>(2, 1);
            double r33 = rotVec.at<double>(2, 2);

            viewMatrixR.makeRotate(
                atan2(r32, r33), osg::Vec3d(1.0, 0.0, 0.0),
                -atan2(-r31, sqrt((r32 * r32) + (r33 * r33))), osg::Vec3d(0.0, 1.0, 0.0),
                -atan2(r21, r11), osg::Vec3d(0.0, 0.0, 1.0));
            

            viewMatrix90.makeRotate(osg::DegreesToRadians(-90.0), osg::Vec3d(1.0, 0.0, 0.0));
            
            viewer.getCamera()->setViewMatrix(viewMatrixT * viewMatrixR);
            
            // Calcul d'erreur de reprojection
            double moy = 0;
            for(int j = 0; j < COLCHESSBOARD * ROWCHESSBOARD; j++)
            {
                double d = sqrt(pow(chessCornersInit[0][j].y - imagePoints[j].y, 2) + pow(chessCornersInit[0][j].x - imagePoints[j].x, 2));
                distances.push_back(d);
                moy += d;
            }

            moyDistances = moy / (COLCHESSBOARD * ROWCHESSBOARD);

            if(moyDistances > 2) // si l'ecart de reproj est trop grand, reset
                resetAuto = true;

            key = cv::waitKey(33);

            backgroundImage->dirty();
            viewer.frame();
        }while(!viewer.done() && !resetAuto && key != 32);

    }while(!viewer.done());
}
Exemplo n.º 6
0
string optical_flow::get_final_direction(Mat m1, Mat m2,
		Rect old_hand_boundary, Rect new_hand_boundary) {

	left_count = 0;
	up_count = 0;
	down_count = 0;
	right_count = 0;
	non_count = 0;

	TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);

	cvtColor(m1, old_gray_img, COLOR_BGR2GRAY);//convert to gray
	cvtColor(m2, new_gray_img, COLOR_BGR2GRAY);

	// extract features
	goodFeaturesToTrack(old_gray_img, old_frame_points, max_count, .01,
			min_distance, Mat(), block_size, 0, .04);
	cornerSubPix(old_gray_img, old_frame_points, Size(10, 10), Size(-1, -1),
			termcrit);

	// track features in next frame
	vector<uchar> status;
	vector<float> err;
	calcOpticalFlowPyrLK(old_gray_img, new_gray_img, old_frame_points,
			new_frame_points, status, err, Size(10, 10), 3, termcrit, 0, 0.001);

	for (unsigned int i = 0; i < new_frame_points.size(); i++) {
		Point2f old_point = old_frame_points.at(i);
		Point2f new_point = new_frame_points.at(i);
		if (!(old_hand_boundary.contains(old_point)
				&& new_hand_boundary.contains(new_point)))
			continue;
		float dist = get_distance(old_point, new_point);
		//		cout<<dist<<endl;
		if (dist < threshold) {
			directions.push_back(non_direction);
			non_count++;
		} else {
			float dx = new_point.x - old_point.x;
			float dy = new_point.y - old_point.y;
			if (abs(dx) > abs(dy)) {//horizontal
				if (abs(dx) <= thresh_in_on_dir)
					non_count++;
				else {
					if (dx < 0) {
						directions.push_back(left_direction);
						left_count++;
					} else {
						directions.push_back(right_direction);
						right_count++;
					}
				}
			} else { //vertical
				if (abs(dy) <= thresh_in_on_dir)
					non_count++;
				else {
					if (dy < 0) {
						directions.push_back(up_direction);
						up_count++;
					} else {
						directions.push_back(down_direction);
						down_count++;
					}
				}
			}
		}
	}

	int dirs_counts[] = { up_count, down_count, left_count, right_count,
			non_count };
	int max_elem = *max_element(dirs_counts, dirs_counts + 5);
	//	cout<<up_count << " "<<down_count<<" "<<left_count<<" " <<right_count<<" "<<non_count<<endl;
	final_direction = "";
	if (up_count == max_elem)
		final_direction = "up";
	else if (down_count == max_elem)
		final_direction = "down";
	else if (left_count == max_elem)
		final_direction = "left";
	else if (right_count == max_elem)
		final_direction = "right";
	else
		final_direction = "none";

	return final_direction;
}
Exemplo n.º 7
0
// Lucas-Kanade optical flow
void QOpenCvImageBox::lucasKanadeOF( IplImage* img ) {
    Point2f pt;
    TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS,20,0.03);  // ERROR

    bool addRemovePt = false;

    Size winSize(10,10);

    cvtColor(image, gray, CV_BGR2GRAY);

    if( nightMode )
        image = Scalar::all(0);

    if( needToInit )
    {
        // automatic initialization
        goodFeaturesToTrack(gray, points[1], MAX_COUNT, 0.01, 10, Mat(), 3, 0, 0.04);
        cornerSubPix(gray, points[1], winSize, Size(-1,-1), termcrit);
        addRemovePt = false;
    }
    else if( !points[0].empty() )
    {
        vector<uchar> status;
        vector<float> err;
        if(prevGray.empty())
            gray.copyTo(prevGray);
        calcOpticalFlowPyrLK(prevGray, gray, points[0], points[1], status, err, winSize, 3, termcrit, 0);
        size_t i, k;
        for( i = k = 0; i < points[1].size(); i++ )
        {
            if( addRemovePt )
            {
                if( norm(pt - points[1][i]) <= 5 )
                {
                    addRemovePt = false;
                    continue;
                }
            }

            if( !status[i] )
                continue;

            points[1][k++] = points[1][i];
            circle( image, points[1][i], 3, Scalar(0,255,0), -1, 8);
        }
        points[1].resize(k);
    }

    if( addRemovePt && points[1].size() < (size_t)MAX_COUNT )
    {
        vector<Point2f> tmp;
        tmp.push_back(pt);
        cornerSubPix( gray, tmp, winSize, cvSize(-1,-1), termcrit);
        points[1].push_back(tmp[0]);
        addRemovePt = false;
    }

    needToInit = false;
    /*
    imshow("LK Demo", image);

    char c = (char)waitKey(10);
    if( c == 27 )
        break;
    switch( c )
    {
    case 'r':
        needToInit = true;
        break;
    case 'c':
        points[1].clear();
        break;
    case 'n':
        nightMode = !nightMode;
        break;
    default:
        ;
    }
    */

    std::swap(points[1], points[0]);
    swap(prevGray, gray);
}
Exemplo n.º 8
0
int Counter::startCount(std::string file, char frontRear/* = 'F'*/)
{
    cv::VideoCapture cap(file.c_str());
    if (!cap.isOpened()) {
        std::cout << "Could not open file" << std::endl;
        return 1;
    }
    fps = 1000/cap.get(CV_CAP_PROP_FPS);
    //int frate = 1000/fps;
    int frate = 20;
    int dumy = 13700;  // @debug  13700  15840   18246   18890   21900

    // Location recognition
    DigitRecognizer dr(1,10,5,7, "./origImages");
    dr.learnFromImages();
    dr.setClassifier();

    // set parameters
    if ('F'==frontRear) {
        setFrontDoor();
    } else {
        setRearDoor();
    }

    std::vector<cv::Point2f> tripWire;                  // points on the tripwire
    std::list<std::vector<cv::Point2f> > trajectories;  // a list of trajectories being tracked
    std::vector<std::list<int> > on_models;             // each model is a list of start times
    std::vector<std::list<int> > off_models;
    float mean_x=0.0f, mean_y=0.0f, var_x=0.0f, var_y=0.0f, length=0.0f;    // trajectory stats

    cv::Mat capframe, frame, image, gray, prevGray, location;
    cv::Mat doorHistBG, door, doorHist;
    cv::Size winSize(31,31);            // window size for optical flow computation
    cv::TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
    int onPassengers = 0;
    int offPassengers = 0;
    int missPassengers = 0;
    int histSize = 2;                   // size of background histogram
    float range[] = { 0, 256 };         // range of pixel values for histogram calculation
    const float* histRange = { range }; //
    std::string prevGPS, currGPS, speed;

    generateTrackingPoints(tripWire);

    while (true) {
        int fno = cap.get(CV_CAP_PROP_POS_FRAMES);
        if (fno>=dumy) {
            std::cout << "";
        }

        cap >> capframe;
        if (capframe.empty()) break;

        frame = capframe(cv::Rect(0,0,580,450));
        //cv::warpPerspective(frame, frame, M, frame.size() );
        frame.copyTo(image);
        cv::cvtColor(image, gray, CV_BGR2GRAY);
        //gammaCorrection(gray);  // note: it becomes worse with Gamma (anti-) correction

        if (prevGray.empty()) {
            gray.copyTo(prevGray);
        }

        // check gps location
        location = capframe(cv::Rect(810, 90, 90, 30));
        currGPS = dr.analyseLocationImage(location, false);
        /*int gpsDistance = 0;
        if (!prevGPS.empty()) {
            std::inner_product(prevGPS.begin(), prevGPS.end(), currGPS.begin(), gpsDistance,
                               std::plus<int>(), std::not_equal_to<char>());
        }
        // add points to trajectories /// NEED TO KNOW THAT GPS DOESN'T CHANGE FOR SEVERAL FRAMES
        if(trajectories.size()<tripWire.size()-10 && gpsDistance<3) { //160 0.8
            addPoints(trajectories, tripWire, fno);
        }*/

        // check if door is closed
        door = gray(rectDoor);
        if (fno<5) {
            cv::Mat tmpDoorHistBG;
            //cv::calcHist(&door, 1, 0, cv::Mat(), tmpDoorHistBG, 1, &histSize, &histRange, true, false);
            tmpDoorHistBG = Utility::HogComp(door);
            cv::normalize(tmpDoorHistBG, tmpDoorHistBG, 1, 0, cv::NORM_L2, -1, cv::Mat());
            if (doorHistBG.empty()) {
                doorHistBG = tmpDoorHistBG;
            } else {
                cv::addWeighted(doorHistBG, 0.7, tmpDoorHistBG, 0.3, 0, doorHistBG, -1);
            }
        }
        //cv::calcHist(&door, 1, 0, cv::Mat(), doorHist, 1, &histSize, &histRange, true, false);
        doorHist = Utility::HogComp(door);
        cv::normalize(doorHist, doorHist, 1, 0, cv::NORM_L2, -1, cv::Mat());
        //float similarityDoor = doorHistBG.dot(doorHist);
        float similarityDoor = cv::compareHist(doorHistBG, doorHist, CV_COMP_CORREL);
        bool bDoorOpen = similarityDoor<0.9;

        // add points to trajectories
        if(trajectories.size()<tripWire.size()-10 && bDoorOpen) { //160 0.8
            addPoints(trajectories, tripWire, fno);
        }

        std::vector<uchar> status;
        std::vector<float> err;
        std::vector<cv::Point2f> nextPoints;
        std::vector<cv::Point2f> prevPoints = lastPoints(trajectories);
        if (prevPoints.empty()==false) {
            cv::calcOpticalFlowPyrLK(prevGray, gray, prevPoints, nextPoints, status, err, winSize, 3, termcrit, 0, 0.001);
        }

        int i=0;
        std::list<std::vector<cv::Point2f> >::iterator iTrack = trajectories.begin();
        for (; iTrack!=trajectories.end(); i++) {
            int szTrack = iTrack->size();
            isValidTrack(*iTrack, mean_x, mean_y, var_x, var_y, length);

            if ((szTrack>3) && (var_x<1.0f) && (var_y<1.0f)) { // stationary points
                iTrack = trajectories.erase(iTrack);
            } else if ((!status[i] || err[i]>13.0) && (szTrack>10)) { // lost of tracking
                iTrack->at(0).y = 1.0;
                iTrack++;
            } else if (szTrack>80) { // too long, remove  120
                iTrack = trajectories.erase(iTrack);
            } else if (szTrack>30) { // long trajectory, try to check 80
                iTrack->at(0).y = 2.0;
                iTrack->push_back(nextPoints[i]);
                iTrack++;
            } else {
                iTrack->push_back(nextPoints[i]);
                iTrack++;
            }
        }

        // update models according to the direction of trajectories
        std::vector<int> startTimes;
        getStartTimes(trajectories, startTimes, fno);
        std::vector<int>::iterator iTime = startTimes.begin();
        for (; iTime!=startTimes.end(); iTime++) {
            int overall_direction = getMajorityDirection(trajectories, *iTime);
            for (i=0, iTrack=trajectories.begin(); iTrack!=trajectories.end(); i++) {
                drawtrajectory(*iTrack, image);
                if (((int)(iTrack->at(0).x) == *iTime) && (iTrack->at(0).y>0.0f)) { // only use trajectories long enough
                    bool validTrack = isValidTrack(*iTrack, mean_x, mean_y, var_x, var_y, length);
                    int onoff = onOroff(*iTrack);
                    if (validTrack && (onoff==overall_direction)) {
                        switch(onoff) {
                        case 0: {offPassengers = updateModel(off_models, *iTrack, onoff);
                            /*std::vector<cv::Point2f>::iterator iit = iTrack->begin();
                            while (iit!=iTrack->end()) {
                                std::cout << iit->x << " " << iit->y << " ";
                                ++iit;
                            }
                            std::cout << std::endl;*/
                            iTrack = trajectories.erase(iTrack);
                            continue;}
                        case 1: {onPassengers = updateModel(on_models, *iTrack, onoff);
                            iTrack = trajectories.erase(iTrack);
                            continue;}
                        case 2: {missPassengers++;
                            iTrack = trajectories.erase(iTrack);
                            continue;}
                        default: std::cout << "Error: Wrong branch!" << std::endl;
                        }
                    }
                    if ((int)(iTrack->at(0).y) == 1) { // lost tracking
                        iTrack = trajectories.erase(iTrack);
                    }
                }

                iTrack++;
            }
        }

        //cv::rectangle(image, rectDoor, cv::Scalar(0,255,0));
        showResultImage(image, onPassengers, offPassengers, currGPS, speed);

        if ((char)cv::waitKey(frate/speedratio)==27) break;
        cv::swap(prevGray, gray);
        std::swap(currGPS, prevGPS);
    }

    return 0;
}
Exemplo n.º 9
0
void main()
{
	bool patternfound = false;
    bool reset = false;
    bool resetAuto = false;

    cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
    cv::Size winSize(31, 31);
    
    cv::Mat cameraMatrix, distCoeffs;
    cv::Mat imCalib;
    cv::Mat imCalibColor;
    cv::Mat imCalibNext;
    cv::Mat rvecs, tvecs;
    
    std::vector<cv::Point2f> imagePoints;
    std::vector<cv::Point3f> objectPoints;
    std::vector<cv::Point3f> cubeObjectPoints;
    std::vector<std::vector<cv::Point2f>> chessCornersInit(2);
    std::vector<cv::Point3f> chessCorners3D;
    std::vector<double> distances;
    double moyDistances;
    

    // Creation des points a projeter
    for(int x = 0; x < COLCHESSBOARD; x++)
        for(int y = 0; y < ROWCHESSBOARD; y++)
            objectPoints.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f));

	// Creation des coins de la mire
    for(int x = 0; x < COLCHESSBOARD; x++)
        for(int y = 0; y < ROWCHESSBOARD; y++)
            chessCorners3D.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f));  

    cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ);

    fs["cameraMatrix"] >> cameraMatrix;
    fs["distCoeffs"] >> distCoeffs;

    fs.release();

    cv::VideoCapture vcap(0); 
    if(!vcap.isOpened()){
        std::cout << "FAIL!" << std::endl;
        return;
    }

    cv::Mat *frame = new cv::Mat(cv::Mat::zeros(vcap.get(CV_CAP_PROP_FRAME_HEIGHT), vcap.get(CV_CAP_PROP_FRAME_WIDTH), CV_8UC3));

	do
    {
        vcap >> *frame;
    }while(frame->empty());

	osg::ref_ptr<osg::Image> backgroundImage = new osg::Image;
    backgroundImage->setImage(frame->cols, frame->rows, 3,
        GL_RGB, GL_BGR, GL_UNSIGNED_BYTE,
        (uchar*)(frame->data),
        osg::Image::AllocationMode::NO_DELETE, 1);

    // read the scene from the list of file specified commandline args.
    osg::ref_ptr<osg::Group> group = new osg::Group;
    osg::ref_ptr<osg::Node> objet3D;

    objet3D = osgDB::readNodeFile("dumptruck.osgt");
    osg::ref_ptr<osg::Camera> cam = createHUD(backgroundImage);

	osgViewer::Viewer viewer; 

    group->addChild(cam);
    group->addChild(objet3D);

    // set the scene to render
    viewer.setSceneData(group.get());

	// projection
	viewer.getCamera()->setProjectionMatrixAsPerspective( 40., 1., 1., 100. ); 

	// Create a matrix to specify a distance from the viewpoint. 
	osg::Matrix trans; 
	trans.makeTranslate( 7, 0., -50. ); 
	// Rotation angle (in radians) 
	double angle( 0. ); 

	char key = 0;
    bool detectionMire = false;

	do
	{
		patternfound = false;
        resetAuto = false;
        detectionMire = false;
            
        imagePoints.clear();
        chessCornersInit[0].clear();
        chessCornersInit[1].clear();
        moyDistances = 0;
        distances.clear();
        imCalibNext.release();
        
        group->removeChild(objet3D);
        std::cout << "recherche de mire" << std::endl;

        do
        {
            vcap >> *frame;
            backgroundImage->dirty();
            detectionMire = detecterMire(frame, &chessCornersInit[1], &imCalibNext);
            viewer.frame();
        }while(!detectionMire && !viewer.done());

        if(viewer.done())
            break;

        std::cout << "mire detectee" << std::endl << std::endl;
        group->addChild(objet3D);

		do
		{
			vcap >> *frame;

			cv::Mat rotVec = trackingMire(frame, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs);

            imagePoints = dessinerPoints(frame, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs);
				
			// Create the rotation matrix. 
			osg::Matrix rot; 
			rot.makeRotate( angle, osg::Vec3( 1., 0., 0. ) ); 
			angle += 0.01; 
			// Set the view matrix (the concatenation of the rotation and 
			//   translation matrices). 
			viewer.getCamera()->setViewMatrix( rot * trans ); 

			double moy = 0;
			for(int j = 0; j < COLCHESSBOARD * ROWCHESSBOARD; j++)
			{
			     double d = sqrt(pow(chessCornersInit[0][j].y - imagePoints[j].y, 2) + pow(chessCornersInit[0][j].x - imagePoints[j].x, 2));
			     distances.push_back(d);
			     moy += d;
			}

			moyDistances = moy / (COLCHESSBOARD * ROWCHESSBOARD);

			if(moyDistances > 2) // si l'ecart de reproj est trop grand, reset
			     resetAuto = true;

			key = cv::waitKey(33);

			// Draw the next frame. 
			backgroundImage->dirty();
			viewer.frame(); 

		}while(!viewer.done() && !resetAuto && key != 32);
	} while(!viewer.done());
}
int main()
{
	time_t timer = 0;
	time_t start = clock();
	time_t startImage = 0;
	std::cout << "Debut projection\t" << std::endl;

	bool patternfound = false;
	int i = 0;

	cv::TermCriteria termcrit(CV_TERMCRIT_ITER|CV_TERMCRIT_EPS, 20, 0.03);
	cv::Size winSize(31,31);
	
	cv::Mat cameraMatrix, distCoeffs;
	cv::Mat imCalib;
	cv::Mat imCalibColor;
	cv::Mat imCalibNext;
	cv::Mat rvecs, tvecs;
	std::vector<cv::Point2f> imagePoints;
	std::vector<cv::Point3f> objectPoints;
	std::vector<cv::Point2f> chessCornersInit[2];
	std::vector<cv::Point3f> chessCorners3D;

	// Creation des points a projeter
	objectPoints.push_back(cv::Point3f(50,25,0));
	objectPoints.push_back(cv::Point3f(150,25,0));
	objectPoints.push_back(cv::Point3f(150,125,0));
	objectPoints.push_back(cv::Point3f(50,125,0));
	objectPoints.push_back(cv::Point3f(50,25,100));
	objectPoints.push_back(cv::Point3f(150,25,100));
	objectPoints.push_back(cv::Point3f(150,125,100));
	objectPoints.push_back(cv::Point3f(50,125,100));

	// Creation des coins de la mire
	for(int x=0 ; x<COLCHESSBOARD ; x++)
		for(int y=0 ; y<ROWCHESSBOARD ; y++)
			chessCorners3D.push_back(cv::Point3f(x*26.0f,y*26.0f,0.0f));	

	cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ);

	fs["cameraMatrix"] >> cameraMatrix;
	fs["distCoeffs"] >> distCoeffs;

	fs.release();

	cv::VideoCapture vcap("../rsc/capture.avi"); 
	if(!vcap.isOpened()){
		  std::cout << "FAIL!" << std::endl;
		  return -1;
	}

	do{
		vcap >> imCalibColor;
		cv::imshow("Projection", imCalibColor);
		cv::cvtColor(imCalibColor, imCalib, CV_BGR2GRAY);
		cv::waitKey();
		timer = clock();
		startImage = clock();
	
		patternfound = cv::findChessboardCorners(imCalib, cv::Size(ROWCHESSBOARD, COLCHESSBOARD), chessCornersInit[0], cv::CALIB_CB_FAST_CHECK);
		
		std::cout << "findChessboardCorners\t" << float(clock()-timer)/CLOCKS_PER_SEC << " sec" << std::endl;
		timer = clock(); 
	} while(!patternfound);

	for(;;)
	{		
		vcap >> imCalibColor;		
						
		if(!imCalibNext.empty())
		{
			cv::swap(imCalib, imCalibNext); // copie de l'ancienne image pour le flot optique
			for(size_t c = 0; c < chessCornersInit[0].size(); c++)
				chessCornersInit[0][c] = chessCornersInit[1][c];
			chessCornersInit[1].clear();
		}
		else
			cv::cornerSubPix(imCalib, chessCornersInit[0], cv::Size(5, 5), cv::Size(-1, -1), cv::TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));

		cv::cvtColor(imCalibColor, imCalibNext, CV_BGR2GRAY);

		std::vector<uchar> status;
		std::vector<float> err;
		cv::calcOpticalFlowPyrLK(imCalib, imCalibNext, chessCornersInit[0], chessCornersInit[1], status, err, winSize, 3, termcrit, 0, 0.0001);

		cv::solvePnP(chessCorners3D, chessCornersInit[0], cameraMatrix, distCoeffs, rvecs, tvecs);

		cv::Mat rotVec(3, 3, CV_64F);
		cv::Rodrigues(rvecs, rotVec);

		//Projection
		cv::projectPoints(objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs, imagePoints);

		// Dessin des points projetes
		cv::line(imCalibColor, imagePoints[0], imagePoints[4], cv::Scalar(255,255,0), 2, 8);
		cv::line(imCalibColor, imagePoints[1], imagePoints[5], cv::Scalar(255,255,0), 2, 8);
		cv::line(imCalibColor, imagePoints[2], imagePoints[6], cv::Scalar(255,255,0), 2, 8);
		cv::line(imCalibColor, imagePoints[3], imagePoints[7], cv::Scalar(255,255,0), 2, 8);

		cv::line(imCalibColor, imagePoints[0], imagePoints[1], cv::Scalar(255,0,255), 2, 8);
		cv::line(imCalibColor, imagePoints[1], imagePoints[2], cv::Scalar(255,0,255), 2, 8);
		cv::line(imCalibColor, imagePoints[2], imagePoints[3], cv::Scalar(255,0,255), 2, 8);
		cv::line(imCalibColor, imagePoints[3], imagePoints[0], cv::Scalar(255,0,255), 2, 8);

		cv::line(imCalibColor, imagePoints[4], imagePoints[5], cv::Scalar(0,255,255), 2, 8);
		cv::line(imCalibColor, imagePoints[5], imagePoints[6], cv::Scalar(0,255,255), 2, 8);
		cv::line(imCalibColor, imagePoints[6], imagePoints[7], cv::Scalar(0,255,255), 2, 8);
		cv::line(imCalibColor, imagePoints[7], imagePoints[4], cv::Scalar(0,255,255), 2, 8);

		cv::imshow("Projection", imCalibColor);

		cv::waitKey(67);
	}

	return 0;
}
Exemplo n.º 11
0
// ----------------------------------------------------------------------------------
void QualityMatcher::doTheMagic(cv::Mat imageSrc, cv::Mat imageDst, cv::Mat priorH, MatchingResultCallback cb)
{
  // keypoints 
  std::vector<cv::KeyPoint> featuresSrc;
  std::vector<cv::KeyPoint> featuresDst;
  
  // TODO - use the provided prior
  
  // prefilter slightly
  cv::Mat imgSrc = imageSrc, imgDst = imageDst;
  //cv::GaussianBlur(imageSrc, imgSrc, cv::Size(3,3), 5.0);
  //cv::GaussianBlur(imageDst, imgDst, cv::Size(3,3), 5.0);

  //cv::medianBlur(imageSrc, imgSrc, 3);
  //cv::medianBlur(imageDst, imgDst, 3);
  
  cv::Mat descriptorsSrc, descriptorsDst;
  
  // detect
  
  //cv::Ptr<cv::FeatureDetector> detector = cv::FeatureDetector::create("SURF");
  //detector->detect(imgSrc, featuresSrc);
  //detector->detect(imgDst, featuresDst);
  
  // features
  cv::FAST(imgSrc, featuresSrc, 50, cv::FastFeatureDetector::TYPE_9_16);
  cv::FAST(imgDst, featuresDst, 50, cv::FastFeatureDetector::TYPE_9_16);
    
  printf("input %d vs %d\n", (int)featuresSrc.size(), (int)featuresDst.size());
  cv::Ptr<cv::DescriptorExtractor> descriptor = cv::DescriptorExtractor::create("ORB" );
  descriptor->compute(imgSrc, featuresSrc, descriptorsSrc);
  descriptor->compute(imgDst, featuresDst, descriptorsDst);
  
  // descriptors
  //cv::BriefDescriptorExtractor descriptor;
  //descriptor.compute(imgSrc, featuresSrc, descriptorsSrc);  
  //descriptor.compute(imgDst, featuresDst, descriptorsDst);
  
  if (featuresDst.size() < 10 || featuresSrc.size() < 10
      || descriptorsSrc.rows != featuresSrc.size()
      || descriptorsDst.rows != featuresDst.size())
  {
    cb(false, priorH);
    return;
  }
  
  // matching (simple nearest neighbours)
  cv::BFMatcher matcher(cv::NORM_HAMMING);
  std::vector<cv::DMatch> matches;
  matcher.match( descriptorsSrc, descriptorsDst, matches );
  
  
  std::vector<cv::DMatch> goodMatches;
  std::vector<cv::Point2f> ptsSrc, ptsDst;
  for( int i = 0; i < matches.size(); i++ )
  {
    if( matches[i].distance <= 20)//std::max(4. * min_dist, 0.02) )
    {
      goodMatches.push_back(matches[i]);
    }
  }
  
  for( int i = 0; i < goodMatches.size(); i++ )
  {
    ptsSrc.push_back( featuresSrc[ goodMatches[i].queryIdx ].pt );
    ptsDst.push_back( featuresDst[ goodMatches[i].trainIdx ].pt );
  }
  
  if (goodMatches.size() < 10)
  {
    printf("MATCH FAILED\n");
    cb(false, priorH);
    return;
  }
  
  /*cv::namedWindow( "Display window", cv::WINDOW_AUTOSIZE );
  cv::Mat img;
  cv::drawMatches(imgSrc, featuresSrc, imgDst, featuresDst, goodMatches, img);
  cv::imshow("imgae1", img);
  
  //cv::imshow("imgae1", imgSrc);
  //cv::imshow("imgae2", imgDst);
  cv::waitKey(0); */
  
  // ----------------------------
  // KLT tracker to further improve the result
  // ----------------------------
  cv::TermCriteria termcrit(cv::TermCriteria::COUNT | cv::TermCriteria::EPS, 30, 0.03);
  cv::cornerSubPix(imgSrc, ptsSrc, cv::Size(3,3), cv::Size(-1,-1), termcrit);
  cv::cornerSubPix(imgDst, ptsDst, cv::Size(3,3), cv::Size(-1,-1), termcrit);
  if(1)
  {
    std::vector<uchar> status;
    std::vector<float> err;
    cv::Size winSize(7,7);
    
    std::vector<cv::Point2f> ptsDstKlt = ptsDst;
    std::vector<cv::Point2f> ptsSrcOld = ptsSrc;
    
    std::vector<cv::Mat> pyrSrc, pyrDst;
    cv::buildOpticalFlowPyramid(imgSrc, pyrSrc, winSize, 4);
    cv::buildOpticalFlowPyramid(imgDst, pyrDst, winSize, 4);
    
    
    /*cv::namedWindow( "Display window", cv::WINDOW_AUTOSIZE );
    cv::Mat img;
    cv::drawMatches(imgSrc, featuresSrc, imgDst, featuresDst, goodMatches, img);
    cv::imshow("imgae1", img);
    cv::waitKey(0);*/
    
    cv::calcOpticalFlowPyrLK(pyrSrc, pyrDst, ptsSrc, ptsDstKlt, status, err, winSize, 4, termcrit, cv::OPTFLOW_USE_INITIAL_FLOW);
    
    // remove bad points
    ptsSrc.clear();
    ptsDst.clear();
    for (size_t i=0; i < status.size(); i++)
    {
      if (!status[i]) continue;
      
      ptsSrc.push_back(ptsSrcOld[i]);
      ptsDst.push_back(ptsDstKlt[i]);
    }
  }
  printf("klt tracked %d\n", (int)ptsDst.size());
  if (ptsDst.size() < 10)
  {
    printf("MATCH FAILED\n");
    cb(false, priorH);
    return;
  }
  
  cv::Mat H = cv::findHomography(ptsSrc, ptsDst, CV_RANSAC, 10.);
  H.convertTo(H, CV_32FC1);
  
  if (!niceHomography(H))
  {
      printf("MATCH FAILED\n");
      cb(false, priorH);
      return;    
  }
  
  // DEBUG  
  printf("H:\n");
  for (int i=0; i < 3; i++)
    printf("%f %f %f\n", H.at<float>(i,0), H.at<float>(i,1), H.at<float>(i,2));
  printf("prior H:\n");
  for (int i=0; i < 3; i++)
    printf("%f %f %f\n", priorH.at<float>(i,0), priorH.at<float>(i,1), priorH.at<float>(i,2));
  
  float nrm = cv::norm(priorH);
  if (nrm > 2)
  {
    nrm = cv::norm(priorH, H);
    printf("(H-prior).norm() = %f\n", nrm);
    if (nrm > 10.0)
    {
      printf("MATCH FAILED - bad H\n");
      cb(false, priorH);
      return;    
    }
  }
  
  cb(true, H);
  
  
  
  printf("matched %d features\n", (int)featuresSrc.size());
}
Exemplo n.º 12
0
void main()
{
    bool patternfound = false;
    bool reset = false;
    bool resetAuto = false;
    int nbImages = 0;
    double moyFinale = 0;
    char key = 0;
    bool detectionMire = false;
	bool detectionVisage = false;
	int cpt = 0, moyCpt = 0, i = 0;

	std::cout << "initialisation de Chehra..." << std::endl;
	Chehra chehra;
	std::cout << "done" << std::endl;

    cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
    cv::Size winSize(31, 31);
    
    cv::Mat cameraMatrix, distCoeffs;
    cv::Mat imCalib;
    cv::Mat imCalibColor;
    cv::Mat imCalibNext;
    cv::Mat rvecs, tvecs;
    cv::Mat Rc, C = cv::Mat(3, 1, CV_64F), rotVecInv;
    
    std::vector<cv::Point2f> imagePoints;
    std::vector<cv::Point3f> objectPoints;
    std::vector<cv::Point3f> cubeObjectPoints;
	std::vector<cv::Point3f> dessinPointsVisage;
    std::vector<std::vector<cv::Point2f>> chessCornersInit(2);
	std::vector<std::vector<cv::Point2f>> pointsVisageInit(2);
    std::vector<cv::Point3f> chessCorners3D;
	std::vector<cv::Point3f> pointsVisage3D;
	std::vector<cv::Point3f> visage;
    std::vector<double> distances;
    double moyDistances;

    // Creation des coins de la mire
    for(int x = 0; x < COLCHESSBOARD; x++)
        for(int y = 0; y < ROWCHESSBOARD; y++)
            chessCorners3D.push_back(cv::Point3f(x * SIZEMIRE, y * SIZEMIRE, 0.0f));  

    // Creation des points a projeter
    for(int x = 0; x < COLCHESSBOARD; x++)
        for(int y = 0; y < ROWCHESSBOARD; y++)
            objectPoints.push_back(cv::Point3f(x * SIZEMIRE, y * SIZEMIRE, 0.0f));
	
	cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ);

	fs["cameraMatrix"] >> cameraMatrix;
	fs["distCoeffs"] >> distCoeffs;

	double f = (cameraMatrix.at<double>(0, 0) + cameraMatrix.at<double>(1, 1)) / 2; // NEAR = distance focale ; si pixels carrés, fx = fy -> np 
	//mais est généralement différent de fy donc on prend (pour l'instant) par défaut la valeur médiane
	double g = 2000 * f; // je sais pas pourquoi. au pif.

	fs.release();

	cv::VideoCapture vcap(0); 
	if(!vcap.isOpened()){
		std::cout << "FAIL!" << std::endl;
		return;
	}

	cv::Mat *frame = new cv::Mat(cv::Mat::zeros(vcap.get(CV_CAP_PROP_FRAME_HEIGHT), vcap.get(CV_CAP_PROP_FRAME_WIDTH), CV_8UC3));

	do
	{
		vcap >> *frame;
	}while(frame->empty());

	osg::ref_ptr<osg::Image> backgroundImage = new osg::Image;
	backgroundImage->setImage(frame->cols, frame->rows, 3,
		GL_RGB, GL_BGR, GL_UNSIGNED_BYTE,
		(uchar*)(frame->data),
		osg::Image::AllocationMode::NO_DELETE, 1);

	// read the scene from the list of file specified commandline args.
	osg::ref_ptr<osg::Group> group = new osg::Group;
	osg::ref_ptr<osg::Geode> cam = createHUD(backgroundImage, vcap.get(CV_CAP_PROP_FRAME_WIDTH), vcap.get(CV_CAP_PROP_FRAME_HEIGHT), cameraMatrix.at<double>(0, 2), cameraMatrix.at<double>(1, 2), f);

	std::cout << "initialisation de l'objet 3D..." << std::endl;
	osg::ref_ptr<osg::Node> objet3D = osgDB::readNodeFile("../rsc/objets3D/Creature.obj");
	std::cout << "done" << std::endl;
   
	osg::StateSet* obectStateset = objet3D->getOrCreateStateSet();
       obectStateset->setMode(GL_DEPTH_TEST,osg::StateAttribute::OFF);
	osg::ref_ptr<osg::MatrixTransform> mat = new osg::MatrixTransform();
	osg::ref_ptr<osg::PositionAttitudeTransform> pat = new osg::PositionAttitudeTransform();

	// construct the viewer.
	osgViewer::CompositeViewer compositeViewer;
	osgViewer::View* viewer = new osgViewer::View;
	osgViewer::View* viewer2 = new osgViewer::View;

	// add the HUD subgraph.
	group->addChild(cam);

	mat->addChild(objet3D);
	pat->addChild(mat);
	group->addChild(pat);

    pat->setScale(osg::Vec3d(3, 3, 3));

	osg::Matrixd projectionMatrix;

	projectionMatrix.makeFrustum(
		-cameraMatrix.at<double>(0, 2),		vcap.get(CV_CAP_PROP_FRAME_WIDTH) - cameraMatrix.at<double>(0, 2),
		-cameraMatrix.at<double>(1, 2),		vcap.get(CV_CAP_PROP_FRAME_HEIGHT) - cameraMatrix.at<double>(1, 2),
		f,								g);

	osg::Vec3d eye(0.0f, 0.0f, 0.0f), target(0.0f, g, 0.0f), normal(0.0f, 0.0f, 1.0f);

	// set the scene to render
	viewer->setSceneData(group.get());
	viewer->setUpViewInWindow(0, 0, 1920 / 2, 1080 / 2); 
	viewer->getCamera()->setProjectionMatrix(projectionMatrix);
	viewer->getCamera()->setViewMatrixAsLookAt(eye, target, normal);

	viewer2->setSceneData(group.get());
	viewer2->setUpViewInWindow(1920 / 2, 0, 1920 / 2, 1080 / 2); 
	viewer2->getCamera()->setProjectionMatrix(projectionMatrix);
	osg::Vec3d eye2(4 * f, 3 * f / 2, 0.0f), target2(0.0f, f, 0.0f), normal2(0.0f, 0.0f, 1.0f);
	viewer2->getCamera()->setViewMatrixAsLookAt(eye2, target2, normal2);

	compositeViewer.addView(viewer);
	compositeViewer.addView(viewer2);

	compositeViewer.realize();  // set up windows and associated threads.



    do
    {       
		group->removeChild(pat);
        patternfound = false;
        resetAuto = false;
        detectionMire = false;
		detectionVisage = false;
            
        imagePoints.clear();
        chessCornersInit[0].clear();
        chessCornersInit[1].clear();
		pointsVisageInit[0].clear();
		pointsVisageInit[1].clear();
		pointsVisage3D.clear();
		dessinPointsVisage.clear();
		visage.clear();
        moyDistances = 0;
        distances.clear();
        imCalibNext.release();
        
        std::cout << "recherche de pattern" << std::endl;

		time_t start = clock();
		double timer = 0;
		
        do
        {
			start = clock();

            vcap >> *frame;

			backgroundImage->dirty();
            //detectionMire = detecterMire(frame, &chessCornersInit[1], &imCalibNext);
			detectionVisage = detecterVisage(frame, &chehra, &pointsVisageInit[1], &visage, &pointsVisage3D, &imCalibNext);

			cpt++;
			double duree = (clock() - start)/(double) CLOCKS_PER_SEC;
			timer += duree;

			if(timer >= 1){
				std::cout << cpt << " fps" << std::endl;
				moyCpt += cpt;
				timer = 0;
				duree = 0;
				i++;
				cpt = 0;
				start = clock();
			}

            compositeViewer.frame();
        }while(!detectionMire && !detectionVisage && !compositeViewer.done());

        if(compositeViewer.done())
            break;

        std::cout << "pattern detectee" << std::endl << std::endl;

		group->addChild(pat);
		
        do
        {           
			start = clock();

            vcap >> *frame;
            
			cv::Mat rotVec = trackingMire(frame, &imCalibNext, &pointsVisageInit, &pointsVisage3D, &cameraMatrix, &distCoeffs, &tvecs);
            //cv::Mat rotVec = trackingMire(frame, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs);

            //imagePoints = dessinerPoints(frame, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs);
			imagePoints = dessinerPoints(frame, pointsVisage3D, rotVec, tvecs, cameraMatrix, distCoeffs);
            
            double r11 = rotVec.at<double>(0, 0);
            double r21 = rotVec.at<double>(1, 0);
            double r31 = rotVec.at<double>(2, 0);
            double r32 = rotVec.at<double>(2, 1);
            double r33 = rotVec.at<double>(2, 2);

			osg::Matrixd matrixR;
            matrixR.makeRotate(
                atan2(r32, r33), osg::Vec3d(1.0, 0.0, 0.0),
                -atan2(-r31, sqrt((r32 * r32) + (r33 * r33))), osg::Vec3d(0.0, 0.0, 1.0),
                atan2(r21, r11), osg::Vec3d(0.0, 1.0, 0.0));
            
            mat->setMatrix(matrixR);
			pat->setPosition(osg::Vec3d(tvecs.at<double>(0, 0), tvecs.at<double>(2, 0), -tvecs.at<double>(1, 0)));

			//std::cout << "x = " << tvecs.at<double>(0, 0) << " - y = " << tvecs.at<double>(1, 0) << " - z = " << tvecs.at<double>(2, 0) << std::endl;

            // Calcul d'erreur de reprojection
            double moy = 0;
            for(int j = 0; j < pointsVisageInit[1].size() ; j++)
			{
				double d = sqrt(pow(pointsVisageInit[0][j].y - imagePoints[j].y, 2) + pow(pointsVisageInit[0][j].x - imagePoints[j].x, 2));
				distances.push_back(d);
				moy += d;
			}

            moyDistances = moy / pointsVisageInit[1].size();

            if(moyDistances > 1) // si l'ecart de reproj est trop grand, reset
                resetAuto = true;

			double duree = (clock() - start)/(double) CLOCKS_PER_SEC;


				std::cout << (int)(1/duree) << " fps" << std::endl;
				moyCpt += (int)(1/duree);
				duree = 0;
				i++;
			
            backgroundImage->dirty();
            compositeViewer.frame();
        }while(!compositeViewer.done() && !resetAuto);
		
    }while(!compositeViewer.done());

	std::cout << std::endl << "Moyenne des fps : " << moyCpt/i << std::endl;

	std::system("PAUSE");
}
int main()
{
	time_t timer = 0;
	time_t start = clock();
	time_t startImage = 0;
	std::cout << "Debut projection\t" << std::endl;

	bool patternfound = false;
	bool reset = false;
	int i = 0;

	cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
	cv::Size winSize(31, 31);
	
	cv::Mat cameraMatrix, distCoeffs;
	cv::Mat imCalib;
	cv::Mat imCalibColor;
	cv::Mat imCalibNext;
	cv::Mat rvecs, tvecs;
	std::vector<cv::Point2f> imagePoints;
	std::vector<cv::Point3f> objectPoints;
	std::vector<cv::Point3f> cubeObjectPoints;
	std::vector<std::vector<cv::Point2f>> chessCornersInit(2);
	std::vector<cv::Point3f> chessCorners3D;
	

	// Creation des points a projeter
	for(int x = 0; x < COLCHESSBOARD; x++)
		for(int y = 0; y < ROWCHESSBOARD; y++)
			objectPoints.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f));

	// Creation des points a projeter
	cubeObjectPoints.push_back(cv::Point3f(52, 26, 0));
	cubeObjectPoints.push_back(cv::Point3f(156, 26, 0));
	cubeObjectPoints.push_back(cv::Point3f(156, 128, 0));
	cubeObjectPoints.push_back(cv::Point3f(52, 128, 0));
	cubeObjectPoints.push_back(cv::Point3f(52, 26, 104));
	cubeObjectPoints.push_back(cv::Point3f(156, 26, 104));
	cubeObjectPoints.push_back(cv::Point3f(156, 128, 104));
	cubeObjectPoints.push_back(cv::Point3f(52, 128, 104));

	// Creation des coins de la mire
	for(int x = 0; x < COLCHESSBOARD; x++)
		for(int y = 0; y < ROWCHESSBOARD; y++)
			chessCorners3D.push_back(cv::Point3f(x * 26.0f, y * 26.0f, 0.0f));	

	cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ);

	fs["cameraMatrix"] >> cameraMatrix;
	fs["distCoeffs"] >> distCoeffs;

	fs.release();

	cv::VideoCapture vcap(0); 
	if(!vcap.isOpened())
	{
		  std::cout << "FAIL!" << std::endl;
		  return -1;
	}

	char key = 0;

	do
	{
		std::cout << "recherche de mire" << std::endl;

		bool detectionMire = detecterMire(vcap, &chessCornersInit[1], &imCalibNext);

		std::cout << "mire detectee" << std::endl << std::endl;

		if(!detectionMire)
			break;

		do
		{
			vcap >> imCalibColor;

			cv::Mat rotVec = trackingMire(&imCalibColor, &imCalibNext, &chessCornersInit, &chessCorners3D, &cameraMatrix, &distCoeffs, &tvecs);
			
			dessinerCube(&imCalibColor, cubeObjectPoints, rotVec, tvecs, cameraMatrix, distCoeffs);
			dessinerPoints(&imCalibColor, objectPoints, rotVec, tvecs, cameraMatrix, distCoeffs);

			cv::imshow("Projection", imCalibColor);

			key = (char)cv::waitKey(30);

		}while(key != 27 && key != 32);

		if(key == 32)
		{
			patternfound = false;
			
			imagePoints.clear();
			chessCornersInit[0].clear();
			chessCornersInit[1].clear();
			imCalibNext.release();
		}

	}while(key != 27);

	return 0;
}
Exemplo n.º 14
0
int main()
{
	time_t timer = 0;
	time_t start = clock();
	time_t startImage = 0;
	std::cout << "Debut projection\t" << std::endl;

	bool patternfound = false;
	bool reset = false;
	bool endVideo = false;
	bool resetAuto = false;
	int i = 0;
	int nbImages = 0;
	double moyFinale = 0;

	cv::TermCriteria termcrit(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03);
	cv::Size winSize(31, 31);
	
	cv::Mat cameraMatrix, distCoeffs;
	cv::Mat imCalib;
	cv::Mat imCalibColor;
	cv::Mat imCalibNext;
	cv::Mat rvecs, tvecs;
	
	std::vector<cv::Point2f> imagePoints;
	std::vector<cv::Point3f> objectPoints;
	std::vector<cv::Point3f> QRpointObject3D;
	std::vector<std::vector<cv::Point2f>> chessCornersInit(2);
	std::vector<std::vector<cv::Point2f>> QRpointinit(2);
	std::vector<cv::Point3f> QRpoint3D;
	std::vector<cv::Point3f> tabuseless;
	std::vector<cv::Point3f> chessCorners3D;
	std::vector<double> distances;
	std::vector<double> moyDistances;
	
	cv::FileStorage fs("../rsc/intrinsicMatrix.yml", cv::FileStorage::READ);

	fs["cameraMatrix"] >> cameraMatrix;
	fs["distCoeffs"] >> distCoeffs;

	fs.release();

	std::ofstream file;
	file.open ("../rsc/error.txt");

	cv::VideoCapture vcap(0);
	if(!vcap.isOpened())
	{
		  std::cout << "FAIL!" << std::endl;
		  return -1;
	}

	char key = 0;

	do
	{
		/*std::cout << "recherche de mire" << std::endl;

		bool detectionMire = detecterMire(vcap, &chessCornersInit[1], &imCalibNext);

		std::cout << "mire detectee" << std::endl << std::endl;*/

		bool detectionQR = detecterQR(vcap , &QRpointinit[1], &QRpoint3D, &tabuseless, &QRpointObject3D, &imCalibNext);

		if(!detectionQR)
			break;

		do
		{
			vcap >> imCalibColor;

			if(imCalibColor.empty()){
				endVideo = true;
				break;
			}

			cv::Mat rotVec = trackingMire(&imCalibColor, &imCalibNext, &QRpointinit, &QRpoint3D, &cameraMatrix, &distCoeffs, &tvecs);

			dessinerPyra(&imCalibColor, QRpointObject3D, rotVec, tvecs, cameraMatrix, distCoeffs);
			imagePoints = dessinerPoints(&imCalibColor, tabuseless, rotVec, tvecs, cameraMatrix, distCoeffs);

			//Calcul d'erreur de reprojection
			double moy = 0;
			for(int j = 0; j < QRpointinit[1].size(); j++)
			{
				double d = sqrt(pow(QRpointinit[0][j].y - tabuseless[j].y, 2) + pow(QRpointinit[0][j].x - tabuseless[j].x, 2));
				distances.push_back(d);
				moy += d;
				/*std::cout << "distance point numero " << j << " : " << std::endl
					<< "    subpix : x = " << chessCornersInit[0][j].x << "    y = " << chessCornersInit[0][j].y << std::endl
					<< "    projec : x = " << imagePoints[j].x << "    y = " << imagePoints[j].y << std::endl
					<< " distance : " << d << std::endl << std::endl;*/
			}

			moyDistances.push_back(moy / QRpointinit[1].size());
			////std::cout << std::endl << std::endl << "moyenne ecart points image " << i << " : " << moyDistances[i] << std::endl << std::endl;
			//file << "moyenne ecart points image " << i << " : " << moyDistances[i] << " px" << std::endl;

			if(moyDistances[i] > 10){ // si l'ecart de reproj est trop grand, reset
				resetAuto = true;
				std::cout << "RESET" << std::endl;
				break;
			}

			//moyFinale += moyDistances[i];
			i++;
			nbImages++;

			cv::imshow("Projection", imCalibColor);

			key = (char)cv::waitKey(67);

		}while(key != 27 && key != 32 && resetAuto != true);

		if(key == 32 || resetAuto == true)
		{
			patternfound = false;
			resetAuto = false;
			i = 0;
			
			imagePoints.clear();
			chessCornersInit[0].clear();
			chessCornersInit[1].clear();
			QRpointinit[0].clear();
			QRpointinit[1].clear();
			QRpoint3D.clear();
			QRpointObject3D.clear();
			tabuseless.clear();
			moyDistances.clear();
			distances.clear();
			imCalibNext.release();
		}

	}while(key != 27 && endVideo != true);
		
	return 0;
}