bool CCapturador::LoadCapturesFromFilesUndisorted(string ruta,Mat& CameraMatrix,Mat& DistMatrix)
{
	m_vCaptures.clear();
	m_nPatterns = m_Options->m_nNumPatterns;
	for (int i = 0; i < m_nPatterns + m_Options->m_nNumFringes * 2; i++)
	{
		std::ostringstream oss;
		oss << ruta;
		if (i < 10)
			oss << "0";
		oss << i << ".jpg";
		string temp = oss.str();
		Mat capture = imread(oss.str(), 1);
		if (capture.empty())
			return false;
		Mat gray;
		cv::cvtColor(capture, gray, CV_BGR2GRAY);
		Mat view, rview, map1, map2;
		initUndistortRectifyMap(CameraMatrix, DistMatrix, Mat(),
			getOptimalNewCameraMatrix(CameraMatrix, DistMatrix, gray.size(), 1, gray.size(), 0),
			gray.size(), CV_16SC2, map1, map2);
		remap(gray, rview, map1, map2, INTER_LINEAR);
		m_vCaptures.push_back(rview);
		oss.clear();
	}
	return true;
}
Esempio n. 2
0
Renderer::Renderer(const char *rootDirectory) {
    char file[200];
    // create color program
    string src;
    sprintf(file, "%s/shader/color.vertexshader", rootDirectory);
    loadShaderCodeFromFile(file, src);
    compileShader(src, GL_VERTEX_SHADER, shader["color_vertex"]);

    sprintf(file, "%s/shader/color.fragmentshader", rootDirectory);
    loadShaderCodeFromFile(file, src);
    compileShader(src, GL_FRAGMENT_SHADER, shader["color_fragment"]);

    if (createRenderProgram(shader["color_vertex"], shader["color_fragment"], program["color"]) == GL_FALSE)
        return;

    MatrixID = glGetUniformLocation(program["color"], "MVP");
    ViewMatrixID = glGetUniformLocation(program["color"], "ViewMatrix");
    ModelMatrixID= glGetUniformLocation(program["color"], "ModelMatrix");
    LightPositionID = glGetUniformLocation(program["color"], "LightPosition_worldspace");

    Mat cameraMatrix, distCoeffs;
    sprintf(file, "%s/intrinsics.xml", rootDirectory);
    cv::FileStorage fs(file, cv::FileStorage::READ);
    fs["camera_matrix"] >> cameraMatrix;
    fs["distortion_coefficients"] >> distCoeffs;
    fs.release();

    // calculate undistortion mapping
    Mat img_rectified, map1, map2;
    initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
                            getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, cv::Size(WIDTH, HEIGHT), 1,
                                                      cv::Size(WIDTH, HEIGHT), 0),
                            cv::Size(WIDTH, HEIGHT), CV_16SC2, map1, map2);

    ViewMatrix = Matrix4f::Identity();
    ViewMatrix.topRightCorner(3,1) << 0,0,-2;

    float n = 0.01; // near field
    float f = 100; // far field
    ProjectionMatrix << cameraMatrix.at<double>(0, 0) / cameraMatrix.at<double>(0, 2), 0.0, 0.0, 0.0,
            0.0, cameraMatrix.at<double>(1, 1) / cameraMatrix.at<double>(1, 2), 0.0, 0.0,
            0.0, 0.0, -(f + n) / (f - n), (-2.0f * f * n) / (f - n),
            0.0, 0.0, -1.0, 0.0;
    K << cameraMatrix.at<double>(0, 0), cameraMatrix.at<double>(0, 1), cameraMatrix.at<double>(0, 2),
            cameraMatrix.at<double>(1, 0), cameraMatrix.at<double>(1, 1), cameraMatrix.at<double>(1, 2),
            cameraMatrix.at<double>(2, 0), cameraMatrix.at<double>(2, 1), cameraMatrix.at<double>(2, 2);
    cout << "K\n" << K << endl;
    Kinv = K.inverse();

    // background ccolor
    glClearColor(0.0f, 0.0f, 0.0f, 0.0f);
    // Enable depth test
    glEnable(GL_DEPTH_TEST);
    // Accept fragment if it closer to the camera than the former one
    glDepthFunc(GL_LESS);
    // Cull triangles which normal is not towards the camera
    glEnable(GL_CULL_FACE);
}
Esempio n. 3
0
VisionNode::VisionNode() {
    cv::FileStorage fs("/home/roboy/workspace/mocap/src/intrinsics.xml", cv::FileStorage::READ);
    if (!fs.isOpened()) {
        ROS_ERROR("could not open intrinsics.xml");
        return;
    }
    fs["camera_matrix"] >> cameraMatrix;
    fs["distortion_coefficients"] >> distCoeffs;
    fs.release();

    ID = 0;

    // calculate undistortion mapping
    initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
                            getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, cv::Size(WIDTH, HEIGHT), 1,
                                                      cv::Size(WIDTH, HEIGHT), 0),
                            cv::Size(WIDTH, HEIGHT), CV_16SC2, map1, map2);

    marker_position_pub = new ros::Publisher;
    *marker_position_pub = nh.advertise<communication::MarkerPosition>("/mocap/marker_position", 100);

    video_pub = new ros::Publisher;
    *video_pub = nh.advertise<sensor_msgs::Image>("/mocap/video", 1);

    camera_control_sub = nh.subscribe("/mocap/camera_control", 100, &VisionNode::camera_control, this);

    cameraID_pub = new ros::Publisher;
    *cameraID_pub = nh.advertise<std_msgs::Int32>("/mocap/cameraID", 100);

    // Publish the marker
    while (cameraID_pub->getNumSubscribers() < 1) {
        ros::Duration d(1.0);
        if (!ros::ok()) {
            return;
        }
        ROS_WARN_ONCE("Waiting for mocap plugin to subscribe to /mocap/cameraID");
        d.sleep();
    }
    ROS_INFO_ONCE("Found subscriber");

    spinner = new ros::AsyncSpinner(1);
    spinner->start();

    std_msgs::Int32 msg;
    msg.data = ID;
    cameraID_pub->publish(msg);

    img = cv::Mat(HEIGHT, WIDTH, CV_8UC4, img_data);
    img_rectified = cv::Mat(HEIGHT, WIDTH, CV_8UC4, img_rectified_data);

    t1 = std::chrono::high_resolution_clock::now();

    StartCamera(WIDTH, HEIGHT, 90, CameraCallback);
}
int main(int argc, char const ** argv)
{
  char const * keys =
    "{    h | help   | false            | show help message}"
    "{    f | file   | calibration.yaml | calibration yaml file}"
    "{    c | camera | 0                | camera number}"
    ;

  cv::CommandLineParser parser(argc, argv, keys);

  if (parser.get<bool>("help"))
  {
    parser.printParams();
    return 0;
  }
  
  int capnum = parser.get<int>("camera");
  cv::VideoCapture cap(capnum);

  CalibrationData calibData;
  calibData.load(parser.get<std::string>("file"));

  cv::Mat map1, map2;
  cv::initUndistortRectifyMap(calibData.cameraMatrix, calibData.distCoeffs, cv::Mat(), 
      getOptimalNewCameraMatrix(calibData.cameraMatrix, calibData.distCoeffs, calibData.imageSize, 1, calibData.imageSize, 0),
      calibData.imageSize, CV_16SC2, map1, map2);

  cv::namedWindow("undistorted");
  while(true)
  {
    cv::Mat frame, undistFrame;
    cap >> frame;

    if(frame.size() != calibData.imageSize)
    {
      std::cerr << "Frame size does not match calibration image size" << std::endl;
      exit(-1);
    }

    cv::remap(frame, undistFrame, map1, map2, cv::INTER_LINEAR);

    cv::imshow("image", frame);
    cv::imshow("undistorted", undistFrame);

    cv::waitKey(10);
  }

  return 0;
}
///////////////////////////////////////////////////////
// Panel::LoadCalibration() 
//  Description: Imports a previously created camera
// calibration created by CalibrateCameraNoOutput. 
///////////////////////////////////////////////////////
void Panel::LoadCalibration(string sFilePath)
{
	cout << "Loading Calibration" << endl;

	//! [file_read]
	
	Mat import_distortion_coefficients;
	Mat import_camera_matrix;
	Mat import_image_points;
	Size import_image_size;

	const string inputSettingsFile = sFilePath;
	FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
	if (!fs.isOpened())
	{
		cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
		//		return -1;
	}
	fs["distortion_coefficients"] >> import_distortion_coefficients;
	fs["camera_matrix"] >> import_camera_matrix;
	fs["image_width"] >> import_image_size.width;
	fs["image_height"] >> import_image_size.height;
	fs["image_points"] >> import_image_points;

	fs.release();                                         // close Settings file
	//! [file_read]

	Mat view, rview, map1, map2;

	initUndistortRectifyMap(import_camera_matrix, import_distortion_coefficients, Mat(),
		getOptimalNewCameraMatrix(import_camera_matrix, import_distortion_coefficients, import_image_size, 1, import_image_size, 0),
		import_image_size, CV_16SC2, map1, map2);

	m_mainMap1 = map1;
	m_mainMap2 = map2;

	m_mainCameraMatrix = import_camera_matrix;
	m_mainDistCoeffs = import_distortion_coefficients;

	cout << "Calibration Loaded" << endl << endl;

}
Esempio n. 6
0
void CameraCalibration::calibrate()
{
    const string inputSettingsFile = "default.xml";
    // Read the settings
    FileStorage fs( inputSettingsFile, FileStorage::READ );
    if ( !fs.isOpened() ) {
        FileStorage fs( inputSettingsFile, FileStorage::WRITE );
        fs.release();
        cerr << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
        return;
    }
    else {
        s.read( fs["Settings"] );
        // close Settings file
        fs.release();
    }

    if ( !s.goodInput ) {
        cerr << "Invalid input detected. Application stopping." << endl;
        return;
    }


    vector<vector<Point2f> > imagePoints;
    Mat distCoeffs;
    Size imageSize;
    int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;
    clock_t prevTimestamp = 0;
    const Scalar RED( 0, 0, 255 ), GREEN( 0, 255, 0 );
    const char ESC_KEY = 27;

    for ( int i = 0; ; ++i ) {
        Mat view;
        bool blinkOutput = false;

        view = s.nextImage();

        //-----  If no more image, or got enough, then stop calibration and show result -------------
        if ( mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames ) {
            if ( runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints ) ) {
                mode = CALIBRATED;
            }
            else {
                mode = DETECTION;
            }
        }
        // If no more images then run calibration, save and stop loop.
        if ( view.empty() ) {
            if ( imagePoints.size() > 0 ) {
                runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
            }
            break;
        }

        imageSize = view.size();  // Format input image.
        if ( s.flipVertical ) {
            flip( view, view, 0 );
        }

        vector<Point2f> pointBuf;

        bool found;
        // Find feature points on the input format
        switch ( s.calibrationPattern ) {
        case Settings::CHESSBOARD:
            found = findChessboardCorners( view, s.boardSize, pointBuf,
                CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
            break;
        case Settings::CIRCLES_GRID:
            found = findCirclesGrid( view, s.boardSize, pointBuf );
            break;
        case Settings::ASYMMETRIC_CIRCLES_GRID:
            found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID );
            break;
        default:
            found = false;
            break;
        }

        // If done with success, improve the found corners' coordinate accuracy for chessboard
        if ( found ) {
                if ( s.calibrationPattern == Settings::CHESSBOARD ) {
                    Mat viewGray;
                    cvtColor( view, viewGray, COLOR_BGR2GRAY );
                    cornerSubPix( viewGray, pointBuf, Size( 11,11 ), Size(-1,-1), TermCriteria( TermCriteria::EPS + TermCriteria::MAX_ITER, 30, 0.1 ) );
                }

                // For camera only take new samples after delay time
                if ( mode == CAPTURING && (!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) ) {
                    imagePoints.push_back( pointBuf );
                    prevTimestamp = clock();
                    blinkOutput = s.inputCapture.isOpened();
                }

                // Draw the corners.
                drawChessboardCorners( view, s.boardSize, Mat( pointBuf ), found );
        }

        //----------------------------- Output Text ------------------------------------------------
        string msg = ( mode == CAPTURING ) ? "100/100" :
                      mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
        int baseLine = 0;
        Size textSize = getTextSize( msg, 1, 1, 1, &baseLine );
        Point textOrigin( view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10 );

        if ( mode == CAPTURING ) {
            if ( s.showUndistorsed ) {
                msg = format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames );
            }
            else {
                msg = format( "%d/%d", (int)imagePoints.size(), s.nrFrames );
            }
        }

        putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ?  GREEN : RED );

        if ( blinkOutput ) {
            bitwise_not( view, view );
        }

        //------------------------- Video capture  output  undistorted ------------------------------
        if ( mode == CALIBRATED && s.showUndistorsed ) {
            Mat temp = view.clone();
            undistort( temp, view, cameraMatrix, distCoeffs );
        }

        //------------------------------ Show image and check for input commands -------------------
        imshow( "Image View", view );
        char key = (char)waitKey( s.inputCapture.isOpened() ? 50 : s.delay );

        if ( key  == ESC_KEY ) {
            break;
        }

        if ( key == 'u' && mode == CALIBRATED ) {
            s.showUndistorsed = !s.showUndistorsed;
        }

        if ( s.inputCapture.isOpened() && key == 'g' ) {
            mode = CAPTURING;
            imagePoints.clear();
        }
    }

    // -----------------------Show the undistorted image for the image list ------------------------
    if ( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed ) {
        Mat view, rview, map1, map2;
        initUndistortRectifyMap( cameraMatrix, distCoeffs, Mat(),
            getOptimalNewCameraMatrix( cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0 ),
            imageSize, CV_16SC2, map1, map2 );

        for ( int i = 0; i < (int)s.imageList.size(); i++ ) {
            view = imread( s.imageList[i], 1 );
            if ( view.empty() ) {
                continue;
            }
            remap( view, rview, map1, map2, INTER_LINEAR );
            imshow( "Image View", rview );
            char c = (char)waitKey();
            if ( c  == ESC_KEY || c == 'q' || c == 'Q' ) {
                break;
            }
        }
    }
}
Esempio n. 7
0
int main_camera(Parameter *pParam)
{

	Size boardSize(8,6);
	Size imageSize;
	int   flags = CV_CALIB_FIX_ASPECT_RATIO;
	float squareSize = pParam->square_size;
	float aspectRatio = 1.f;
	Mat cameraMatrix;
	Mat distCoeffs;
	Mat frame;
	VideoCapture video;

	int flag_finish = 0;

	int result = 0;

	// read frames from data file
	vector<vector<Point2f> > imagePointsSet;
	vector<Point2f> imagePoints;
	vector<string>  fileNames;

	fileNames.clear();
	imagePointsSet.clear();

	video.open(pParam->camera_index);

	if(video.isOpened() != true)
	{
		printf("fail to open camera %d\n", pParam->camera_index);

		video.open(-1);

		if(video.isOpened() != true)
		{
			printf("fail to open the default camera, please make sure an accessible camera is connected \n");
			return -1;
		}
		else
		{
			printf("open the default camera\n");
		}
	}

	while(flag_finish == 0)
	{
		Mat framecv;
		int found = 0;

		video >> frame;

		cvtColor(frame, framecv, CV_RGB2GRAY);

		imshow("framecv", framecv); // for oberserving input
		waitKey(10);

		if(framecv.cols <= 0 || framecv.rows <= 0 || framecv.data == NULL )
		{
			printf("finish chess board detection \n");
			break;
		}

		imagePoints.clear();
		imageSize.width = framecv.cols;
		imageSize.height = framecv.rows;

		found = findChessboardCorners(
						framecv,
						boardSize,
						imagePoints,
						CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);

		if(found)
		{
			cornerSubPix(
					framecv,
					imagePoints,
					Size(11,11),
					Size(-1,-1),
					TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

			if(found)
			{
				char key = 0;
				drawChessboardCorners( framecv, boardSize, Mat(imagePoints), found);
				imshow("framecv_xx", framecv);
				key = waitKey(0);
				if(key == 'c' || key == 'C') // not correct
					continue;
				else if(key == 'q' || key == 'Q')
					return 0;
				else if(key == 's' || key == 'S')
					flag_finish = 1;
			}

			printf("get a new chess board input\n");
			imagePointsSet.push_back(imagePoints);

		}
		else
		{
			printf("no found usable chess board\n");
		}

	}


	// calibrate cameras
	if(1)
    {
	    vector<Mat> rvecs, tvecs;
	    vector<float> reprojErrs;
	    double totalAvgErr = 0;

	    result = runCalibration(imagePointsSet, imageSize, boardSize, CHESSBOARD, squareSize,
                   aspectRatio, flags, cameraMatrix, distCoeffs,
                   rvecs, tvecs, reprojErrs, totalAvgErr);
    }

	// test calibrate
	if(1)
	{
        Mat view, rview, map1, map2;
        int i;
        Size imageSize2;

        imageSize2.width  = 2 * imageSize.width;
        imageSize2.height = 2 * imageSize.height;

        initUndistortRectifyMap(cameraMatrix,
        						distCoeffs,
								Mat(),
                                getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
                                imageSize, CV_16SC2, map1, map2);


        while(1)
        {
        	char key = 0;
        	video>>view;
        	remap(view, rview, map1, map2, INTER_LINEAR);

        	imshow("rview", rview);
        	key = waitKey(0);
        	if(key == 's')
        		break;
        	else if(key == 'q')
        		break;
        }
	}
Esempio n. 8
0
int main_file(Parameter *pParam)
{
	char *file_list = pParam->image_list;//"/home/sean/Pictures/calib_test1/dir.txt";

	Size boardSize(8,6);
	Size imageSize;
	int   flags = CV_CALIB_FIX_ASPECT_RATIO;
	float squareSize = pParam->square_size;
	float aspectRatio = 1.f;
	Mat cameraMatrix;
	Mat distCoeffs;
	int result = 0;

	// read frames from data file
	vector<vector<Point2f> > imagePointsSet;
	vector<Point2f> imagePoints;
	vector<string>  fileNames;

	fileNames.clear();
	imagePointsSet.clear();

	getFileName(file_list, fileNames);

	for(unsigned int i = 0; i < fileNames.size(); i++)
	{
		Mat framecv;
		int found = 0;

		framecv = imread(fileNames[i].c_str(), 0);

		if(framecv.cols <= 0 || framecv.rows <= 0 || framecv.data == NULL )
		{
			printf("finish chess board detection \n");
			break;
		}

		imagePoints.clear();
		imageSize.width = framecv.cols;
		imageSize.height = framecv.rows;

		found = findChessboardCorners(
						framecv,
						boardSize,
						imagePoints,
						CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);

		if(found)
		{
			cornerSubPix(
					framecv,
					imagePoints,
					Size(11,11),
					Size(-1,-1),
					TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

			if(found)
			{
				drawChessboardCorners( framecv, boardSize, Mat(imagePoints), found);
				imshow("framecv_xx", framecv);
				waitKey(10);
			}

			imagePointsSet.push_back(imagePoints);

		}

	}


	// calibrate cameras
	if(1)
    {
	    vector<Mat> rvecs, tvecs;
	    vector<float> reprojErrs;
	    double totalAvgErr = 0;

	    result = runCalibration(imagePointsSet, imageSize, boardSize, CHESSBOARD, squareSize,
                   aspectRatio, flags, cameraMatrix, distCoeffs,
                   rvecs, tvecs, reprojErrs, totalAvgErr);
    }

	// test calibrate
	if(1)
	{
        Mat view, rview, map1, map2;
        int i;
        Size imageSize2;

        imageSize2.width  = 2 * imageSize.width;
        imageSize2.height = 2 * imageSize.height;

        initUndistortRectifyMap(cameraMatrix,
        						distCoeffs,
								Mat(),
                                getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
                                imageSize, CV_16SC2, map1, map2);


        for(i = 0; i < fileNames.size(); i++)
        {
        	view = imread(fileNames[i].c_str());
        	remap(view, rview, map1, map2, INTER_LINEAR);

        	imshow("rview", rview);
        	waitKey(0);
        }
	}

	// save
	if(result == 0 )
	{
		save_result(pParam->output_path, cameraMatrix, distCoeffs);
	}

}
Esempio n. 9
0
void CalibrateThread::run()
{
    Size boardSize, imageSize;
    float squareSize = 1.f, aspectRatio = 1.f;
    Mat cameraMatrix, distCoeffs;
    //QString of = ui->lineEdit_WorkFolder->text() + '/' + ui->lineEdit_OutputName->text();
    QByteArray ba = strFileName.toLatin1();
    const char* outputFilename = ba.data();

    int i, nframes = 0;
    bool writeExtrinsics = true, writePoints = true;
    bool undistortImage = false;
    int flags = 0;
    VideoCapture capture;
    bool flipVertical = false;
    bool showUndistorted = false;

    int delay = 1000;
    clock_t prevTimestamp = 0;
    int mode = CAPTURING;
    vector<vector<Point2f> > imagePoints;
    vector<string> imageList;
    Pattern pattern = CHESSBOARD;

    boardSize.width = m_width;
    boardSize.height = m_height;
    squareSize = m_squaresize;



    //ui->textEdit_Information->append("\nCalibrating... Please wait for a while\n");


    if( imgList.size() == 0  )
    {
        //QMessageBox::warning(NULL, "Error", "Please choose a right folder");
        emit popupErrorInformation("Please choose a right folder");
        emit closeImageWindow();
        return;
    }
    else
    {
        nframes = imgList.size();
    }

    emit appendText("\nCalibrating... Please wait for a while\n");
    //namedWindow( "Image View", 1 );
    //bDialog->show();

    for(i = 0; i < nframes ;i++)
    {
        //ui->textEdit_Information->append("Processing the image No. " + QString::number(i + 1));


        emit appendText("Processing the image No. " + QString::number(i + 1));
        Mat view, viewGray;
        bool blink = false;
        qDebug(imgList.at(i).toLatin1().data());
        if( i < (int)imgList.size() )
            view = imread(imgList.at(i).toLatin1().data(), 1);

        if(!view.data)
        {
            //QMessageBox::warning(NULL, "Error", );
            emit popupErrorInformation("Could not open image files");
            return;
        }

        imageSize = view.size();

        if( flipVertical )
            flip( view, view, 0 );

        vector<Point2f> pointbuf;
        cvtColor(view, viewGray, CV_BGR2GRAY);

        bool found;
        switch( pattern )
        {
        case CHESSBOARD:
            found = findChessboardCorners( view, boardSize, pointbuf,
                                           CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
            break;
        case CIRCLES_GRID:
            found = findCirclesGrid( view, boardSize, pointbuf );
            break;
        case ASYMMETRIC_CIRCLES_GRID:
            found = findCirclesGrid( view, boardSize, pointbuf, CALIB_CB_ASYMMETRIC_GRID );
            break;
        }

        // improve the found corners' coordinate accuracy
        if( pattern == CHESSBOARD && found) cornerSubPix( viewGray, pointbuf, Size(11,11),
                                                          Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

        if( mode == CAPTURING && found &&
                (!capture.isOpened() || clock() - prevTimestamp > delay*1e-3*CLOCKS_PER_SEC) )
        {
            imagePoints.push_back(pointbuf);
            prevTimestamp = clock();
            blink = capture.isOpened();
        }
        if(found)
            drawChessboardCorners( view, boardSize, Mat(pointbuf), found );

        string msg = mode == CAPTURING ? "100/100" :
                                         mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
        int baseLine = 0;
        Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
        Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);

        if( mode == CAPTURING )
        {
            if(undistortImage)
                msg = format( "%d/%d Undist", (int)imagePoints.size(), nframes );
            else
                msg = format( "%d/%d", (int)imagePoints.size(), nframes );
        }

        putText( view, msg, textOrigin, 1, 1,
                 mode != CALIBRATED ? Scalar(0,0,255) : Scalar(0,255,0));

        if( blink )
            bitwise_not(view, view);

        if( mode == CALIBRATED && undistortImage )
        {
            Mat temp = view.clone();
            undistort(temp, view, cameraMatrix, distCoeffs);
        }

        Mat rgb;
        cvtColor(view, rgb, CV_BGR2RGB);

        QImage  image32 = QImage(rgb.cols, rgb.rows, QImage::Format_RGB32);
        QRgb value;
        for(int r = 0; r < rgb.rows; r++)
        {
            for(int c = 0; c < rgb.cols; c++)
            {
                value = qRgb(rgb.ptr<uchar>(0)[r * rgb.cols * 3 + c * 3 + 0], rgb.ptr<uchar>(0)[r * rgb.cols * 3 + c * 3 + 1], rgb.ptr<uchar>(0)[r * rgb.cols * 3 + c * 3 + 2]);
                image32.setPixel(c, r, value);
            }
        }


        emit showBitmap(image32);

        int key;
        if(i < nframes - 1)
        {
            key = 0xff & waitKey(500);
        }
        else
        {
            key = waitKey(500);
        }

        if( (key & 255) == 27 )
            break;

        if( key == 'u' && mode == CALIBRATED )
            undistortImage = !undistortImage;




    }
    if(imagePoints.size() > 0)
    {
        emit appendText("\n" + QString::number(imagePoints.size()) + " out of " + QString::number(nframes) + " images are effective!\n" );
        runAndSave(outputFilename, imagePoints, imageSize,
                   boardSize, pattern, squareSize, aspectRatio,
                   flags, cameraMatrix, distCoeffs,
                   writeExtrinsics, writePoints);

    }
    else
    {
        emit appendText("Calibrating is not successful! \nPlease change the parameters and try again!");
        emit popupErrorInformation("Sorry, no enough points are detected! Please try another group of images!");
        emit closeImageWindow();
        return;

    }


    emit appendText("Calibrating Successfully! \nPlease go to the folder to check the out put files!");
    emit closeImageWindow();
    if( !capture.isOpened() && showUndistorted )
    {
        Mat view, rview, map1, map2;
        initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
                                getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
                                imageSize, CV_16SC2, map1, map2);

        for( i = 0; i < (int)imageList.size(); i++ )
        {
            view = imread(imageList[i], 1);
            if(!view.data)
                continue;
            //undistort( view, rview, cameraMatrix, distCoeffs, cameraMatrix );
            remap(view, rview, map1, map2, INTER_LINEAR);
            imshow("Image View", rview);
            int c = 0xff & waitKey();
            if( (c & 255) == 27 || c == 'q' || c == 'Q' )
                break;
        }
    }

    return;
}
	void Calibration::updateUndistortion() {
		Mat undistortedCameraMatrix = getOptimalNewCameraMatrix(distortedIntrinsics.getCameraMatrix(), distCoeffs, distortedIntrinsics.getImageSize(), fillFrame ? 0 : 1);
		initUndistortRectifyMap(distortedIntrinsics.getCameraMatrix(), distCoeffs, Mat(), undistortedCameraMatrix, distortedIntrinsics.getImageSize(), CV_16SC2, undistortMapX, undistortMapY);
		undistortedIntrinsics.setup(undistortedCameraMatrix, distortedIntrinsics.getImageSize());
	}
Esempio n. 11
0
/// Calibrates the extrinsic parameters of the setup and saves it to an XML file
/// Press'r' to retreive chessboard corners
///      's' to save and exit
///      'c' to exit without saving
/// In: inputCapture1: video feed of camera 1
///     inputCapture2: video feed of camera 2
void CalibrateEnvironment(VideoCapture& inputCapture1, VideoCapture& inputCapture2)
{
    Size boardSize;
    boardSize.width = BOARD_WIDTH;
    boardSize.height = BOARD_HEIGHT;
    
    const string fileName1 = "CameraIntrinsics1.xml";
    const string fileName2 = "CameraIntrinsics2.xml";
    
    cerr << "Attempting to open configuration files" << endl;
    FileStorage fs1(fileName1, FileStorage::READ);
    FileStorage fs2(fileName2, FileStorage::READ);
    
    Mat cameraMatrix1, cameraMatrix2;
    Mat distCoeffs1, distCoeffs2;
    
    fs1["Camera_Matrix"] >> cameraMatrix1;
    fs1["Distortion_Coefficients"] >> distCoeffs1;
    fs2["Camera_Matrix"] >> cameraMatrix2;
    fs2["Distortion_Coefficients"] >> distCoeffs2;
    
    if (cameraMatrix1.data == NULL || distCoeffs1.data == NULL ||
        cameraMatrix2.data == NULL || distCoeffs2.data == NULL)
    {
        cerr << "Could not load camera intrinsics\n" << endl;
    }
    else{
        cerr << "Loaded intrinsics\n" << endl;
        cerr << "Camera Matrix1: " << cameraMatrix1 << endl;
        cerr << "Camera Matrix2: " << cameraMatrix2 << endl;
        
    }
    
    Mat translation;
    Mat image1, image2;
    Mat mapX1, mapX2, mapY1, mapY2;
    inputCapture1.read(image1);
    Size imageSize = image1.size();
    bool rotationCalibrated = false;
    
    while(inputCapture1.isOpened() && inputCapture2.isOpened())
    {
        inputCapture1.read(image1);
        inputCapture2.read(image2);
        
        if (rotationCalibrated)
        {
            Mat t1 = image1.clone();
            Mat t2 = image2.clone();
            remap(t1, image1, mapX1, mapY1, INTER_LINEAR);
            remap(t2, image2, mapX2, mapY2, INTER_LINEAR);
            t1.release();
            t2.release();
        }
        
        char c = waitKey(15);
        if (c == 'c')
        {
            cerr << "Cancelling..." << endl;
            return;
        }
        else if(c == 's' && rotationCalibrated)
        {
            cerr << "Saving..." << endl;
            const string fileName = "EnvironmentCalibration.xml";
            FileStorage fs(fileName, FileStorage::WRITE);
            fs << "Camera_Matrix_1" <<  getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1,imageSize, 0);
            fs << "Camera_Matrix_2" <<  getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0);
            fs << "Mapping_X_1" << mapX1;
            fs << "Mapping_Y_1" << mapY1;
            fs << "Mapping_X_2" << mapX2;
            fs << "Mapping_Y_2" << mapY2;
            fs << "Translation" << translation;
            cerr << "Exiting..." << endl;
            destroyAllWindows();
            return;
        }
        else if(c == 's' && !rotationCalibrated)
        {
            cerr << "Exiting..." << endl;
            destroyAllWindows();
            return;
        }
        else if (c == 'r')
        {
            BoardSettings s;
            s.boardSize.width = BOARD_WIDTH;
            s.boardSize.height = BOARD_HEIGHT;
            s.cornerNum = s.boardSize.width * s.boardSize.height;
            s.squareSize = (float)SQUARE_SIZE;
            
            vector<Point3f> objectPoints;
            vector<vector<Point2f> > imagePoints1, imagePoints2;
            
            if (RetrieveChessboardCorners(imagePoints1, imagePoints2, s, inputCapture1, inputCapture2, ITERATIONS))
            {
                vector<vector<Point3f> > objectPoints(1);
                CalcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0]);
                objectPoints.resize(imagePoints1.size(),objectPoints[0]);
                
                Mat R, T, E, F;
                Mat rmat1, rmat2, rvec;
                
                double rms = stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E, F,
                                             TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01),
                                             CV_CALIB_FIX_INTRINSIC);
                
                cerr << "Original translation: " << T << endl;
                cerr << "Reprojection error reported by camera: " << rms << endl;
                
                // convert to rotation vector and then remove 90 degree offset
                Rodrigues(R, rvec);
                rvec.at<double>(1,0) -= 1.570796327;
                
                // equal rotation applied to each image...not necessarily needed
                rvec = rvec/2;
                Rodrigues(rvec, rmat1);
                invert(rmat1,rmat2);
                
                initUndistortRectifyMap(cameraMatrix1, distCoeffs1, rmat1,
                                        getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1,imageSize, 0), imageSize, CV_32FC1, mapX1, mapY1);
                initUndistortRectifyMap(cameraMatrix2, distCoeffs2, rmat2,
                                        getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0), imageSize, CV_32FC1, mapX2, mapY2);
                
                
                // reproject points in camera 1 since its rotation has been changed
                // need to find the translation between cameras based on the new camera 1 orientation
                for  (int i = 0; i < imagePoints1.size(); i++)
                {
                    Mat pointsMat1 = Mat(imagePoints1[i]);
                    Mat pointsMat2 = Mat(imagePoints2[i]);
                    
                    
                    undistortPoints(pointsMat1, imagePoints1[i], cameraMatrix1, distCoeffs1, rmat1,getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1, imageSize, 0));
                    undistortPoints(pointsMat2, imagePoints2[i], cameraMatrix2, distCoeffs2, rmat2,getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0));
                    
                    pointsMat1.release();
                    pointsMat2.release();
                }
                
                Mat temp1, temp2;
                R.release();
                T.release();
                E.release();
                F.release();
                
                // TODO: remove this
                // CalcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0]);
                // objectPoints.resize(imagePoints1.size(),objectPoints[0]);
                
                stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E, F,
                                TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01),
                                CV_CALIB_FIX_INTRINSIC);
                
                // need to alter translation matrix so
                // [0] = distance in X direction (right from perspective of camera 1 is positive)
                // [1] = distance in Y direction (away from camera 1 is positive)
                // [2] = distance in Z direction (up is positive)
                translation = T;
                double temp = -translation.at<double>(0,0);
                translation.at<double>(0,0) = translation.at<double>(2,0);
                translation.at<double>(2,0) = temp;
                
                cerr << "Translation reproj: " << translation << endl;
                Rodrigues(R, rvec);
                cerr << "Reprojected rvec: " << rvec << endl;
                
                imagePoints1.clear();
                imagePoints2.clear();
                
                rvec.release();
                rmat1.release();
                rmat2.release();
                R.release();
                T.release();
                E.release();
                F.release();
                
                rotationCalibrated = true;
            }
        }
        imshow("Image View1", image1);
        imshow("Image View2", image2);
    }
}
bool CCapturador::CapturePatternsUndisorted(Mat& CameraMatrix,Mat& DistMatrix,int time)
{
	m_vCaptures.clear();
	VideoCapture cap(0); // open the default camera
	if (!cap.isOpened())  // check if we succeeded
		return -1;
	bool bMakeCapture = false;
	int nPatterns = 0;
	namedWindow("Camera", 1);
	namedWindow("Patrones");
/*
	HWND win_handle = FindWindow(0, L"Patrones");
	if (!win_handle)
	{
		printf("Failed FindWindow\n");
	}

	// Resize
	unsigned int flags = (SWP_SHOWWINDOW | SWP_NOSIZE | SWP_NOMOVE | SWP_NOZORDER);
	flags &= ~SWP_NOSIZE;
	unsigned int x = 0;
	unsigned int y = 0;
	unsigned int w = m_Options->m_nWidth;
	unsigned int h = m_Options->m_nHeight;
	SetWindowPos(win_handle, HWND_NOTOPMOST, x, y, w, h, flags);

	// Borderless
	SetWindowLong(win_handle, GWL_STYLE, GetWindowLong(win_handle, GWL_EXSTYLE) | WS_EX_TOPMOST);
	ShowWindow(win_handle, SW_SHOW);
	cvMoveWindow("Patrones", 0, 0);
    */
    long A = getTickCount();
    long B = getTickCount();
	bool start = false;
	for (int i = 0;;)
	{
		imshow("Patrones", m_vPatterns[i]);
		Mat frame;
		cap >> frame;
		if (frame.empty())
			return false;
		Mat view, rview, map1, map2;
		initUndistortRectifyMap(CameraMatrix, DistMatrix, Mat(),
			getOptimalNewCameraMatrix(CameraMatrix, DistMatrix, frame.size(), 1, frame.size(), 0),
			frame.size(), CV_16SC2, map1, map2);
		remap(frame, rview, map1, map2, INTER_LINEAR);
		imshow("Camera", rview);
        B = getTickCount();
		int C = B - A;
		if ((C>time&&start) || waitKey(30) >= 0)
		{
			start = true;
			cout << "time = " << C << endl;
            A = getTickCount();
			i++;
			Mat capture = frame.clone();
			Mat gray;
			cv::cvtColor(capture, gray, CV_BGR2GRAY);
			m_vCaptures.push_back(gray);
			if (++nPatterns >= m_nPatterns)
				break;
		};
	}
	cout << "Patrones capturados." << endl;
	cvDestroyWindow("Patrones");
	return true;
}
///////////////////////////////////////////////////////
// Panel::CalibrateCamera() Description
///////////////////////////////////////////////////////
void Panel::CalibrateCamera(string sFilePath)
{
	help();

	//! [file_read]
	Settings s;
	const string inputSettingsFile = sFilePath;
	FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings
	if (!fs.isOpened())
	{
		cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
//		return -1;
	}
	fs["Settings"] >> s;
	fs.release();                                         // close Settings file
	//! [file_read]

	//FileStorage fout("settings.yml", FileStorage::WRITE); // write config as YAML
	//fout << "Settings" << s;

	if (!s.goodInput)
	{
		cout << "Invalid input detected. Application stopping. " << endl;
//		return -1;
	}

	vector<vector<Point2f> > imagePoints;
	Mat cameraMatrix, distCoeffs;
	Size imageSize;
	int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;
	clock_t prevTimestamp = 0;
	const Scalar RED(0, 0, 255), GREEN(0, 255, 0);
	const char ESC_KEY = 27;
	int counter = 1;

	//! [get_input]
	for (;;)
	{
		Mat view;
		bool blinkOutput = false;

		view = s.nextImage();

		//-----  If no more image, or got enough, then stop calibration and show result -------------
		if (mode == CAPTURING && imagePoints.size() >= (size_t)s.nrFrames)
		{
			if (runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints))
				mode = CALIBRATED;
			else
				mode = DETECTION;
		}
		if (view.empty())          // If there are no more images stop the loop
		{
			// if calibration threshold was not reached yet, calibrate now
			if (mode != CALIBRATED && !imagePoints.empty())
				runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
			break;
		}
		//! [get_input]

		imageSize = view.size();  // Format input image.
		if (s.flipVertical)    flip(view, view, 0);

		//! [find_pattern]
		vector<Point2f> pointBuf;

		bool found;
		switch (s.calibrationPattern) // Find feature points on the input format
		{
		case Settings::CHESSBOARD:
			found = findChessboardCorners(view, s.boardSize, pointBuf,
				CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
			break;
		case Settings::CIRCLES_GRID:
			found = findCirclesGrid(view, s.boardSize, pointBuf);
			break;
		case Settings::ASYMMETRIC_CIRCLES_GRID:
			found = findCirclesGrid(view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID);
			break;
		default:
			found = false;
			break;
		}
		//! [find_pattern]
		//! [pattern_found]
		if (found)                // If done with success,
		{
			// improve the found corners' coordinate accuracy for chessboard
			if (s.calibrationPattern == Settings::CHESSBOARD)
			{
				Mat viewGray;
				cvtColor(view, viewGray, COLOR_BGR2GRAY);
				cornerSubPix(viewGray, pointBuf, Size(11, 11),
					Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1));
			}

			if (mode == CAPTURING &&  // For camera only take new samples after delay time
				(!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC))
			{
				imagePoints.push_back(pointBuf);
				prevTimestamp = clock();
				blinkOutput = s.inputCapture.isOpened();
			}

			// Draw the corners.
			drawChessboardCorners(view, s.boardSize, Mat(pointBuf), found);
		}
		//! [pattern_found]
		//----------------------------- Output Text ------------------------------------------------
		//! [output_text]
		string msg = (mode == CAPTURING) ? "100/100" :
			mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
		int baseLine = 0;
		Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
		Point textOrigin(view.cols - 2 * textSize.width - 10, view.rows - 2 * baseLine - 10);

		if (mode == CAPTURING)
		{
			if (s.showUndistorsed)
				msg = format("%d/%d Undist", (int)imagePoints.size(), s.nrFrames);
			else
				msg = format("%d/%d", (int)imagePoints.size(), s.nrFrames);
		}

		putText(view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED);

		if (blinkOutput)
			bitwise_not(view, view);
		//! [output_text]
		//------------------------- Video capture  output  undistorted ------------------------------
		//! [output_undistorted]
		if (mode == CALIBRATED && s.showUndistorsed)
		{
			Mat temp = view.clone();
			undistort(temp, view, cameraMatrix, distCoeffs);
		}
		//! [output_undistorted]
		//------------------------------ Show image and check for input commands -------------------
		//! [await_input]
		
		namedWindow("Image View" + to_string(counter), WINDOW_NORMAL);
		resizeWindow("Image View" + to_string(counter), 640, 480);
		imshow("Image View" + to_string(counter), view);
		char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay);

		cout << "Image " << to_string(counter) << " Completed" << endl;
		counter++;

		if (key == ESC_KEY)
			break;

		if (key == 'u' && mode == CALIBRATED)
			s.showUndistorsed = !s.showUndistorsed;

		if (s.inputCapture.isOpened() && key == 'g')
		{
			mode = CAPTURING;
			imagePoints.clear();
		}
		//! [await_input]
	}

	// -----------------------Show the undistorted image for the image list ------------------------
	//! [show_results]
	if (s.inputType == Settings::IMAGE_LIST && s.showUndistorsed)
	{
		Mat view, rview, map1, map2;
		initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
			getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
			imageSize, CV_16SC2, map1, map2);

		m_mainMap1 = map1;
		m_mainMap2 = map2;

		for (size_t i = 0; i < s.imageList.size(); i++)
		{
			view = imread(s.imageList[i], 1);
			if (view.empty())
				continue;
			remap(view, rview, map1, map2, INTER_LINEAR);
			imshow("Image View", rview);
			char c = (char)waitKey();
			if (c == ESC_KEY || c == 'q' || c == 'Q')
				break;
		}
	}
	//! [show_results]

//	return 0;

}