Exemple #1
0
int main(int argc, const char * argv[])
{

    // Create the root node and add a scene
    osg::Group* root = new osg::Group();  
    
    ///////////////////////////////////////////////////////////////////////////////////
    // Step 1:
    // Open a video capture device
    cv::VideoCapture* capture = new cv::VideoCapture(0);

	
    // Check whether the video camera is available
	if (!capture ) {
		std::cout << "Error" << std::endl;
		return -1;
	}
 
    capture->set(CV_CAP_PROP_FRAME_WIDTH, (int)width);
    capture->set(CV_CAP_PROP_FRAME_HEIGHT, (int)height);
    
    // Init the video by fetching the first frame
    cv::Mat videoImage;
    cv::Mat videoImageRGB;
    (*capture) >> videoImage;
    cvtColor(videoImage, videoImageRGB, CV_RGBA2RGB);
    
	// Read the camera properties width and height;
    width = capture->get(CV_CAP_PROP_FRAME_WIDTH);
    height = capture->get(CV_CAP_PROP_FRAME_HEIGHT);
     // Query the frame size of the video image.
    cv::Size size = cvSize((int) capture->get(CV_CAP_PROP_FRAME_WIDTH), (int) capture->get(CV_CAP_PROP_FRAME_HEIGHT));

    ///////////////////////////////////////////////////////////////////////////////////
    // Step 2:
    // Create the video background
    osg::Group* videoCanvas = VideoRenderer::createSzene(videoImageRGB.ptr<uchar>(0), size.width, size.height);
    root->addChild(videoCanvas);

  
	///////////////////////////////////////////////////////////////////////////////////
    // Load the camera calibration parameters
    // output Matrices
    cv::Mat intrincsicMatrix = Mat::zeros(3,3, CV_32F);
    cv::Mat distCoeffs = Mat::zeros(1, 4, CV_32F);
    
    FileStorage fs("../data_art/Camera_Parameter_File.yml", FileStorage::READ);
    fs[ "intrinsic_matrix"] >> intrincsicMatrix;
    fs[ "dist_coeffs"] >> distCoeffs;
    fs.release();
    cout << intrincsicMatrix << endl;
    cout << distCoeffs << endl;
    

    ///////////////////////////////////////////////////////////////////////////////////
    // Step 3:
    // Init the Marker Tracker
    MarkerDetector* md = new MarkerDetector(width, height, intrincsicMatrix, distCoeffs);
    md->setThreshold(150);
    

    ///////////////////////////////////////////////////////////////////////////////////
    // Step 4:
    // Create Scene
    root->addChild(createScene());
    
    
    ///////////////////////////////////////////////////////////////////////////////////
    // Step 5:
    // Load a camera configuration and apply this configuration on the projection matrix. 
    Size imageSize;
    imageSize.width = width;
    imageSize.height = height;

    // physical size of the sensor.
    double apertureWidth = 3.2;
    double apertureHeight = 2.4;
    double fovx, fovy, focalLength, aspectRatio;
    Point2d principalPoint;
    cv::calibrationMatrixValues(intrincsicMatrix, imageSize, apertureWidth, apertureHeight, fovx, fovy, focalLength, principalPoint, aspectRatio);
    
    osg::Matrixd projectionmatrix;
    projectionmatrix.makePerspective(fovy, fovx/fovy, 0.1, 1000.0);
    
    
    ///////////////////////////////////////////////////////////////////////////////////
    // Step 6:
    // Create a viewer and add a manipulator
    osgViewer::Viewer* viewer = new osgViewer::Viewer();
	viewer->setSceneData( root );
	viewer->setUpViewOnSingleScreen(0);
    viewer->getCamera()->setClearColor(osg::Vec4(0.5, 0.5, 1.0, 1.0)) ;
    viewer->getCamera()->setProjectionMatrix(projectionmatrix);
    //viewer->setCameraManipulator(new osgGA::TrackballManipulator);
    viewer->getCamera()->setViewMatrixAsLookAt(osg::Vec3d(0.0,-10.0, 0.0), osg::Vec3d(0.0,0.0, 0.0), osg::Vec3d(0.0,0.0, 1.0));
    //viewer->addEventHandler(new KeyboardEventHandler(teapot));
    viewer->init();
    
    
    ///////////////////////////////////////////////////////////////////////////////////
    // Step 7:
    // Run the viewer

	// Two transform nodes are created
    while(!viewer->done())
    {
        // Fetch an image
        (*capture) >> videoImage;
        cvtColor(videoImage, videoImageRGB, CV_RGBA2RGB);
        
		 // Update the marker tracking
        md->findMarkers(videoImageRGB, detectedMarkers, false);

        // Run the viewer
        viewer->frame();
        

        textStr = "Found object "  + g_modelname;
        text->setText( textStr);
        
        textStr2 = g_paddlelocation;
        text2->setText( textStr2);

        textStr3 = g_state;
        text3->setText( textStr3);
        
        textStr4 = g_direction;
        text4->setText( textStr4);
        
        textStr5 = g_speed;
        text5->setText( textStr5);
        
        textStr6 = g_direction2;
        text6->setText( textStr6);
    }
    
	capture->release();
    
    return 0;
}