Esempio n. 1
0
void DrawAxis::openWebcam()
{
	webcam.open(0);

	webcam.set(CV_CAP_PROP_FRAME_WIDTH, frameWidth);
	webcam.set(CV_CAP_PROP_FRAME_HEIGHT, frameHeight);
	rvec = Mat(Size(3, 1), CV_64F);
	tvec = Mat(Size(3, 1), CV_64F);
	cout << "intrinsinc = " << intrinsic_params << endl;
	cout << "dist = " << distortion_params << endl;
	cout << "intrinsic.size = " << intrinsic_params.size() << endl;
	while (true){
		webcam.read(webcamImage);
		bool findCorners = findChessboardCorners(webcamImage, boardSize, corners, CALIB_CB_FAST_CHECK);
		if (findCorners){
			solvePnP(Mat(boardPoints), Mat(corners), intrinsic_params, distortion_params, rvec, tvec, false);
			projectPoints(cubePoints, rvec, tvec, intrinsic_params, distortion_params, cubeFramePoints);
			projectPoints(framePoints, rvec, tvec, intrinsic_params, distortion_params, imageFramePoints);

			drawAxis(webcamImage, color, 3);
			drawCube(webcamImage, cubeFramePoints, Scalar(255, 0, 255), 2);

		}
		namedWindow("OpenCV Webcam", 0);
		imshow("OpenCV Webcam", webcamImage);

		waitKey(10);
	}
}
	bool Calibration::findBoard(Mat img, vector<Point2f>& pointBuf, bool refine) {
		bool found=false;
		if(patternType == CHESSBOARD) {
			// no CV_CALIB_CB_FAST_CHECK, because it breaks on dark images (e.g., dark IR images from kinect)
//			int chessFlags = CV_CALIB_CB_ADAPTIVE_THRESH;// | CV_CALIB_CB_NORMALIZE_IMAGE;
   			int chessFlags = CV_CALIB_CB_ADAPTIVE_THRESH;
			found = findChessboardCorners(img, patternSize, pointBuf, chessFlags);
			
			// improve corner accuracy
			if(found) {
				if(img.type() != CV_8UC1) {
					cvtColor(img, grayMat, CV_RGB2GRAY);
				} else {
					grayMat = img;
				}
				
				if(refine) {
					// the 11x11 dictates the smallest image space square size allowed
					// in other words, if your smallest square is 11x11 pixels, then set this to 11x11
					cornerSubPix(grayMat, pointBuf, subpixelSize,  cv::Size(-1,-1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1 ));
				}
			}
		}
//#ifdef USING_OPENCV_2_3
		else {
			int flags = (patternType == CIRCLES_GRID ? CALIB_CB_SYMMETRIC_GRID : CALIB_CB_ASYMMETRIC_GRID); // + CALIB_CB_CLUSTERING
			found = findCirclesGrid(img, patternSize, pointBuf, flags);
		}
//#endif
		return found;
	}
Esempio n. 3
0
void Camera::calibrate(vector<string> imageList, int boardWidth, int boardHeight) {
    vector<vector<Point2f> > imagePoints;
    Size boardSize, imageSize;
    boardSize.width=boardWidth;
    boardSize.height=boardHeight;
    int flags=0;
    int i;

    float squareSize = 1.f, aspectRatio = 1.f;

      Mat view, viewGray;

    for(i = 0; i<(int)imageList.size();i++)
    {

         view = cv::imread(imageList[i], 1);
         imageSize = view.size();
         vector<Point2f> pointbuf;
         cvtColor(view, viewGray, CV_BGR2GRAY); 

         bool found = findChessboardCorners( view, boardSize, pointbuf,
                                            CV_CALIB_CB_ADAPTIVE_THRESH & CV_CALIB_CB_FAST_CHECK & CV_CALIB_CB_NORMALIZE_IMAGE);

         if(found) 
         {
            drawChessboardCorners( view, boardSize, Mat(pointbuf), found );
            imagePoints.push_back(pointbuf);
         }

    }
    prepareandRunCalibration(imagePoints, imageSize, boardSize, squareSize, aspectRatio, flags, K, Dist);


}
void stereoCalibThread::monoCalibration(const vector<string>& imageList, int boardWidth, int boardHeight, Mat &K, Mat &Dist)
{
    vector<vector<Point2f> > imagePoints;
    Size boardSize, imageSize;
    boardSize.width=boardWidth;
    boardSize.height=boardHeight;
    int flags=0;
    int i;

    float squareSize = 1.f, aspectRatio = 1.f;

      Mat view, viewGray;

    for(i = 0; i<(int)imageList.size();i++)
    {

         view = cv::imread(imageList[i], 1);
         imageSize = view.size();
         vector<Point2f> pointbuf;
         cvtColor(view, viewGray, CV_BGR2GRAY);

         bool found = false;
         if(boardType == "CIRCLES_GRID") {
             found = findCirclesGridDefault(view, boardSize, pointbuf, CALIB_CB_SYMMETRIC_GRID  | CALIB_CB_CLUSTERING);
         } else if(boardType == "ASYMMETRIC_CIRCLES_GRID") {
             found = findCirclesGridDefault(view, boardSize, pointbuf, CALIB_CB_ASYMMETRIC_GRID | CALIB_CB_CLUSTERING);
         } else {
             found = findChessboardCorners( view, boardSize, pointbuf,
                                            CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
         }

         if(found) 
         {
            drawChessboardCorners( view, boardSize, Mat(pointbuf), found );
            imagePoints.push_back(pointbuf);
         }

    }
    std::vector<Mat> rvecs, tvecs;
    std::vector<float> reprojErrs;
    double totalAvgErr = 0;

    K = Mat::eye(3, 3, CV_64F);
    if( flags & CV_CALIB_FIX_ASPECT_RATIO )
        K.at<double>(0,0) = aspectRatio;
    
    Dist = Mat::zeros(4, 1, CV_64F);
    
    std::vector<std::vector<Point3f> > objectPoints(1);
    calcChessboardCorners(boardSize, squareSize, objectPoints[0]);

    objectPoints.resize(imagePoints.size(),objectPoints[0]);
    
    double rms = calibrateCamera(objectPoints, imagePoints, imageSize, K,
                    Dist, rvecs, tvecs,CV_CALIB_FIX_K3);
    printf("RMS error reported by calibrateCamera: %g\n", rms);
    cout.flush();
}
Esempio n. 5
0
bool Fiducial::find(Mat gray, bool accurate =false){
	found = findChessboardCorners(gray, size, corners,CV_CALIB_CB_ADAPTIVE_THRESH 
														+ CV_CALIB_CB_NORMALIZE_IMAGE 
														+ CV_CALIB_CB_FAST_CHECK);
	if(found && accurate){
	  cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
	}
	return found;
}
Esempio n. 6
0
	// Sarkpontok koordinátáinak helyes megkapása:
	bool Chessboard::getCornerGoodCoordinates(void)
	{
		// Tábla megkeresése, sarkok visszaadása
		bool found = findChessboardCorners(this->GreyPicture, this->BoardSize, this->CornerPoints,
			CV_CALIB_CB_ADAPTIVE_THRESH);  ///| CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
		// Hibakezelés:
		if (!found)
		{
			return false;
		}
		else
		{
			// ----------------------------------------------------------------------------------
			// Rendezni kell a sarkpontokat, mert alapvetõen random sorrendben jelennek meg.
			// Sarkpont iteráció: keressük a min és max y koordinátájú sarkpontokat:

			Point2f FirstPoint = CornerPoints[0];
			float MinY = 10000;
			float MaxY = 0;
			float MaxX = 0;
			float MinX = 10000;

			// Sarkpontokon (!) végiglépkedve
			std::vector<Point2f>::iterator it = CornerPoints.begin();
			for (int CornerIdx = 0; CornerIdx < 4; CornerIdx++)
			{
				// Szélsõ pontok kiválasztása a tábla helyzetének megállapításához
				if (it->y > MaxY) MaxY = it->y;
				if (it->y < MinY) MinY = it->y;
				if (it->x > MaxX) MaxX = it->x;
				if (it->x < MinX) MinX = it->x;

				// Következõ sarkpontra lépés:
				if (CornerIdx % 2 == 0) it += 8;
				else if (CornerIdx == 1) it += 64;

			}
			// Meglévõ sarkpontok transzformációjának 4 esetre bontása:
			// Határok meghúzása:
			float xBorder = MinX + ((MaxX - MinX) / 2);
			float yBorder = MinY + ((MaxY - MinY) / 2);

			if (FirstPoint.x <= xBorder && FirstPoint.y <= yBorder) transformCorners90();
			else if (FirstPoint.x < xBorder && FirstPoint.y > yBorder); // Ez alapból jó
			else if (FirstPoint.x > xBorder && FirstPoint.y < yBorder) transformCorners180();
			else if (FirstPoint.x >= xBorder && FirstPoint.y >= yBorder) transformCorners270();
			else
			{
				cout << "Rohadt nagy error a sarkpont koordináta transzformációban!" << endl;
				return false;
			}

			return true;
		}
	}
Esempio n. 7
0
    void Corners(Size boardSize)
    {
        //namedWindow( "image left", WINDOW_AUTOSIZE );
        //namedWindow( "image right", WINDOW_AUTOSIZE );
        bool foundL = findChessboardCorners( left, boardSize, ptvecL, CV_CALIB_CB_ADAPTIVE_THRESH );
        bool foundR = findChessboardCorners( right, boardSize, ptvecR, CV_CALIB_CB_ADAPTIVE_THRESH );
        /*
        std::cout << "foundL = " << foundL << " foundR = " << foundR << std::endl;
        std::cout << " L " << ptvecL.size() << " R " << ptvecR.size() << std::endl;
        if(!foundL)
        {
            std::cout << "FoundL not true" << std::endl;
            rotate(left, -90, left);
            ptvecL.clear();
            bool foundL = findChessboardCorners( left, boardSize, ptvecL, CV_CALIB_CB_ADAPTIVE_THRESH );
            std::cout << "foundL = " << foundL << " L " << ptvecL.size() << std::endl;
        }
        else
        {
            std::cout << "foundL = " << foundL << " L " << ptvecL.size() << std::endl;
        }
        if(!foundR)
        {
            std::cout << "FoundR not true" << std::endl;
            flip(right,right,1);
            ptvecR.clear();
            foundR = findChessboardCorners( left, boardSize, ptvecL, CV_CALIB_CB_ADAPTIVE_THRESH );
            std::cout << "foundR = " << foundL << " R " << ptvecR.size() << std::endl;
        }
        else
        {
            std::cout << "foundR = " << foundR << " R " << ptvecR.size() << std::endl;
        }
        */
        drawChessboardCorners( left, boardSize,ptvecL, foundL );
        drawChessboardCorners( right, boardSize,ptvecR, foundR );
        //imshow("image left ",left);
        //imshow("image right ",right);

    }
Esempio n. 8
0
bool generate2DPointsFromCheesboard(Mat undistord_image, Size chess_height_width, std::vector<Point2f> &ptvec)
{

	//cvtColor(undistord_image,undistord_image,CV_RGB2GRAY);
	bool found = findChessboardCorners( undistord_image, chess_height_width, ptvec, CV_CALIB_CB_ADAPTIVE_THRESH );
	//cout << ptvec ;
	if (found)
		drawChessboardCorners(undistord_image,chess_height_width, ptvec, found) ;
	else cout<< "ERROR ; chessboard not found" << endl;
	//imshow("Cheesboard found", undistord_image);
	//waitKey(0); 
	return found ;	
}
bool Chessboard::findCorners(ofPixels &image, vector<ofVec2f> &points) const {
	int subPixSearch = image.getWidth() / squaresX / 10.0f;
	if (subPixSearch % 2 == subPixSearch)
		subPixSearch++; //ensure odd numbers
	
    int chessFlags = CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK;
    bool found;
	
	Mat img = toCv(image);
    
    found = findChessboardCorners(img, cv::Size(squaresX-1, squaresY-1), *(vector<Point2f>*)&points, chessFlags);
    
    return found;
}
Esempio n. 10
0
//导入标定图片,提取角点
bool CSingleCalib::addChessBoardPoints(String *strImageName)
{
	cout << "CSingleCalib::addChessBoardPoints !" << endl;
	bool bRet = false;
	do
	{
		vector<Point2f> imageCorners;
		vector<Point3f> objectCorners;
		

		for (int i = 0; i < ChessBoardSize_h; ++i)
		{
			for (int j = 0; j < ChessBoardSize_w; ++j)
			{
				objectCorners.push_back(Point3f(i*SquareWidth, j*SquareWidth, 0));
			}
		}
		cv::Mat image;
		
		for (int i = 1; i <= NImage; ++i)
		{
			image = imread(*strImageName);
			
			cvtColor(image, grayImage, CV_BGR2GRAY);
			
			bool found = findChessboardCorners(image, ChessBoardSize, imageCorners);
			cornerSubPix(grayImage, imageCorners, Size(5, 5), cv::Size(-1, -1),
						cv::TermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.1));
			cout << i << endl;
			if (imageCorners.size() != ChessBoardSize.area())
			{
				cout << "检测的角点数和棋盘格本身的数目不等" << endl;
				break; 
			}
			
			addPoints(imageCorners, objectCorners);
			
			cv::drawChessboardCorners(image, ChessBoardSize, imageCorners, found);
			cv::imshow("BoardCorner", image);
			cv::waitKey(10);
			++strImageName;
		}
//		cvDestroyWindow("image");
		cvDestroyWindow("BoardCorner");
		bRet = true;
	} while (false);
	return bRet;
}
Esempio n. 11
0
std::vector<pf::Corners> getCornersSamples(size_t index) {

    cv::Size numSquares(NUM_HOR_SQUARES, NUM_VER_SQUARES);
    cv::VideoCapture capture(index + 1);

    if (!capture.isOpened()) {
        std::cerr << "Can't open the camera" << std::endl;
        std::exit(-1);
    }

    capture.set(CV_CAP_PROP_FPS, FRAMES_PER_SECOND);

    std::vector<pf::Corners> cornersSamples;
    bool started = false;
    clock_t time = 0;

    while (cornersSamples.size() < NUM_FRAMES) {

        // Capture frame
        cv::Mat frame;
        capture >> frame;

        // Find chessboard corners
        auto found = findChessboardCorners(frame);

        if (found.second && started && clock() - time > DELAY_BETWEEN_FRAMES) {
            time = clock();
            cornersSamples.push_back(found.first);
            cv::bitwise_not(frame, frame);
        }

        // Show image
        cv::drawChessboardCorners(frame, numSquares, cv::Mat(found.first), found.second);
        cv::imshow("Calibrate", frame);

        // Wait for 's' to start
        if (cv::waitKey(100) == 's') {
            started = true;
        }
    }

    return cornersSamples;
}
void findCorner(vector<Point2f>&corners,vector<vector<Point2f>>&corners_Seq,vector<Mat>&image_Seq){

	cout<<"start find corners"<<endl; 
	int image_count = 25;
	Size board_size = Size(9,6);

	int successImageNum = 0;
	Mat image,imageGray;

	int count = 0;
	for(int i = 0 ;i<image_count;i++){
		image = imread(findFileName(i));
		cvtColor(image,imageGray,CV_RGB2GRAY);
		bool found = findChessboardCorners(image, board_size, corners,CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE+ 
                CALIB_CB_FAST_CHECK);
		if (!found){
			cout <<"not found corners!"<< endl;
			continue;
		}else{

			cornerSubPix(imageGray, corners, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1);
			Mat temp = image.clone();
			drawChessboardCorners( temp, board_size, Mat(corners), found );
			string imageFileName;
        	        std::stringstream StrStm;
        	        StrStm<<i+1;
        	        StrStm>>imageFileName;
        	        imageFileName += "_corner.jpg";
        	        imwrite(imageFileName,temp);
        	        cout<<"Frame corner#"<<i+1<<"...end"<<endl;

			 count = count + corners.size();
            		 successImageNum = successImageNum + 1;
            		 corners_Seq.push_back(corners);
		}
		 image_Seq.push_back(image);			
	}
	cout << " corners find success"<< endl;


}
Esempio n. 13
0
	bool Calibration::findBoard(Mat img, vector<Point2f> &pointBuf, bool refine) {
		// no CV_CALIB_CB_FAST_CHECK, because it breaks on dark images (e.g., dark IR images from kinect)
		int chessFlags = CV_CALIB_CB_ADAPTIVE_THRESH;// | CV_CALIB_CB_NORMALIZE_IMAGE;
		bool found = findChessboardCorners(img, patternSize, pointBuf, chessFlags);
		
		// improve corner accuracy
		if(found) {
			if(img.type() != CV_8UC1) {
				cvtColor(img, grayMat, CV_RGB2GRAY);
			} else {
				grayMat = img;
			}
			
			// the 11x11 dictates the smallest image space square size allowed
			// in other words, if your smallest square is 11x11 pixels, then set this to 11x11
			cornerSubPix(grayMat, pointBuf, cv::Size(11, 11),  cv::Size(-1,-1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1 ));
			
			return true;
		} else {
			return false;
		}
	}
Esempio n. 14
0
//#include <opencv2/highgui/highgui.hpp>
//#include <opencv2/xfeatures2d/nonfree.hpp>
int main() {
  char key;
  int i = 0;
  int j = 0;
  bool boardFound = false;
  cv::Mat currFrame;
  std::vector<cv::Point2f> boardCorners;
  
  cv::namedWindow("Camera_Output", 1);

  //cv::VideoCapture capture(CV_CAP_ANY); //Capture using any camera connected to your system
  cv::VideoCapture capture(1); //Capture using any camera connected to your system
  if(!capture.isOpened()) {  // check if we succeeded
    return -1;
  }

  while(1) { //Create infinte loop for live streaming
    capture >> currFrame;

    bool boardFound = findChessboardCorners(currFrame, cv::Size(5,7), boardCorners, CV_CALIB_CB_ADAPTIVE_THRESH );

    if (boardFound) {
      for(i=0, j=1; j<boardCorners.size(); ++i, ++j) {
        cv::line(currFrame, boardCorners[i], boardCorners[j], cv::Scalar(0, 255, 0), 4);
      }
    }
    
    cv::imshow("Camera_Output", currFrame);

    key = cv::waitKey(10); //Capture Keyboard stroke
    if ((char(key) == 27) || (char(key) == 'q')){
      break; //If you hit ESC key loop will break.
    }
  }
  return 0;
}
int StereoCameraCalibration::handleStereoCalibration(struct StereoCameraCalibration::CalibrationConfigStruct struct_calibrationConfig)
{
    //The capture for the image
    VideoCapture leftCameraCapture;
    VideoCapture rightCameraCapture;
    //Used to signify the capture portion is done


    int quitCapture = 0;


    int lastKey = -1;
    int key = -1;

    double totalAvgErr;
    double rmsErr;

    CalibrationStates currentCalibrationState = CALIBRATION_STATE_INIT;


    //A vector to hold the points found during calibration
    vector< vector<Point2f> >calibPoints[CAMERA_LOCATION_RIGHT+1];

    //How many frames we have
    int frameCount = 0;

    clock_t prevTimeStamp = clock();


    Mat currentLeftFrame;
    Mat currentRightFrame;
    Mat leftCaptureFrame;
    Mat rightCaptureFrame;

    Mat flippedLeftFrame;
    Mat flippedRightFrame;



    Mat cameraMatrix[2];
    Mat distCoeffs[2];
    Mat R, T, R1, R2, P1, P2, Q;
    Rect validRoi[2];
    Mat rmap[2][2];



    while(currentCalibrationState != CALIBRATION_STATE_DONE)
    {

        key = waitKey(33) & 0xff;
        switch(currentCalibrationState)
        {
        case CALIBRATION_STATE_INIT:
        {


            //Open the captures
            leftCameraCapture.open(struct_calibrationConfig.leftCameraId);
            rightCameraCapture.open(struct_calibrationConfig.rightCameraId);

            if(!(leftCameraCapture.set(CV_CAP_PROP_FPS, 30.0)))
            {
                cout << "Left frame rate set failed" << endl;
            }
            if(!(rightCameraCapture.set(CV_CAP_PROP_FPS, 30.0)))
            {
                cout << "Right frame rate set failed" << endl;


            }



            if(!(leftCameraCapture.set(CV_CAP_PROP_FRAME_WIDTH, 640)))
            {
                cout << "Left frame width set failed" << endl;
            }
            if(!(leftCameraCapture.set(CV_CAP_PROP_FRAME_HEIGHT, 480)))
            {
                cout << "Left frame height set failed" << endl;
            }


            if(!(rightCameraCapture.set(CV_CAP_PROP_FRAME_WIDTH, 640)))
            {
                cout << "Right frame width set failed" << endl;


            }
            if(!(rightCameraCapture.set(CV_CAP_PROP_FRAME_HEIGHT, 480)))
            {
                cout << "Right frame height set failed" << endl;


            }



            //Named window for calibration
            namedWindow("Current Left Calibration Image Raw", 1);
            namedWindow("Current Right Calibration Image Raw", 1);

            //Named window for calibration
            namedWindow("Current Left Calibration Image", 1);
            namedWindow("Current Right Calibration Image", 1);


            cout << "Starting calibration feature point capture." << endl;
            currentCalibrationState = CALIBRATION_STATE_SHOW_IMAGE_BEFORE_CAPTURE;

            break;
        }
        case CALIBRATION_STATE_SHOW_IMAGE_BEFORE_CAPTURE:
        {

            if(leftCameraCapture.isOpened() && rightCameraCapture.isOpened())
            {

                leftCameraCapture >> leftCaptureFrame;
                rightCameraCapture >> rightCaptureFrame;


                leftCaptureFrame.copyTo(currentLeftFrame);


                rightCaptureFrame.copyTo(currentRightFrame);


            }

            //cout << "Left Frame Size" <<currentLeftFrame.rows <<"x" << currentLeftFrame.cols << endl;
            //cout << "Right Frame Size" <<currentRightFrame.rows <<"x" << currentRightFrame.cols << endl;

            if(!currentLeftFrame.data)
            {
                cout << "No Frame Data from Left Camera" << endl;
                return 2;
            }

            if(!currentRightFrame.data)
            {
                cout << "No Frame Data from Right Camera" << endl;
                return 2;
            }



            //currentFrame.copyTo(flippedFrame);

            flip(currentLeftFrame, flippedLeftFrame,1);
            flip(currentRightFrame, flippedRightFrame,1);

            imshow("Current Left Calibration Image Raw", flippedLeftFrame);
            imshow("Current Right Calibration Image Raw", flippedRightFrame);


            if(key == 's' || key == ' ')
            {

                prevTimeStamp = clock();
                currentCalibrationState = CALIBRATION_STATE_IMAGE_CAPTURE;

            }
            if(key == 27)
            {

                currentCalibrationState =CALIBRATION_STATE_ABORT;

            }



            break;
        }
        case CALIBRATION_STATE_IMAGE_CAPTURE:
        {
            if(leftCameraCapture.isOpened() && rightCameraCapture.isOpened())
            {

                leftCameraCapture >> leftCaptureFrame;
                leftCaptureFrame.copyTo(currentLeftFrame);

                rightCameraCapture >> rightCaptureFrame;
                rightCaptureFrame.copyTo(currentRightFrame);


            }

            if(!currentLeftFrame.data)
            {
                cout << "No Frame Data from Left Camera" << endl;
                return 2;
            }

            if(!currentRightFrame.data)
            {
                cout << "No Frame Data from Right Camera" << endl;
                return 2;
            }

            Mat flippedLeftFrame;
            Mat flippedRightFrame;


            //currentFrame.copyTo(flippedFrame);

            flip(currentLeftFrame, flippedLeftFrame,1);
            flip(currentRightFrame, flippedRightFrame,1);

            imshow("Current Left Calibration Image Raw", flippedRightFrame);
            imshow("Current Right Calibration Image Raw", flippedLeftFrame);




            Mat currentFrameGray[CAMERA_LOCATION_RIGHT+1];

            cvtColor(currentLeftFrame,currentFrameGray[CAMERA_LOCATION_LEFT],CV_BGR2GRAY);
            cvtColor(currentRightFrame,currentFrameGray[CAMERA_LOCATION_RIGHT],CV_BGR2GRAY);


            vector<Point2f> currentFramePoints[CAMERA_LOCATION_RIGHT+1];

            bool foundPoints[CAMERA_LOCATION_RIGHT+1];

            //Find the corners of the Chessboard
            foundPoints[CAMERA_LOCATION_LEFT] = findChessboardCorners( currentFrameGray[CAMERA_LOCATION_LEFT], struct_calibrationConfig.boardSize, currentFramePoints[CAMERA_LOCATION_LEFT], CV_CALIB_CB_ADAPTIVE_THRESH & CV_CALIB_CB_FAST_CHECK & CV_CALIB_CB_NORMALIZE_IMAGE);
            foundPoints[CAMERA_LOCATION_RIGHT] = findChessboardCorners( currentFrameGray[CAMERA_LOCATION_RIGHT], struct_calibrationConfig.boardSize, currentFramePoints[CAMERA_LOCATION_RIGHT], CV_CALIB_CB_ADAPTIVE_THRESH & CV_CALIB_CB_FAST_CHECK & CV_CALIB_CB_NORMALIZE_IMAGE);



            if(foundPoints[CAMERA_LOCATION_LEFT] || foundPoints[CAMERA_LOCATION_RIGHT])
            {
                if(foundPoints[CAMERA_LOCATION_LEFT])
                {


                    cornerSubPix( currentFrameGray[CAMERA_LOCATION_LEFT], currentFramePoints[CAMERA_LOCATION_LEFT], Size(5,5), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));


                }
                if(foundPoints[CAMERA_LOCATION_RIGHT])
                {



                    cornerSubPix( currentFrameGray[CAMERA_LOCATION_RIGHT], currentFramePoints[CAMERA_LOCATION_RIGHT], Size(5,5), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));


                }


                if(foundPoints[CAMERA_LOCATION_LEFT] && foundPoints[CAMERA_LOCATION_RIGHT])
                {
                    if(clock() - prevTimeStamp > struct_calibrationConfig.delay*1e-3*CLOCKS_PER_SEC)
                    {


                        prevTimeStamp = clock();
                        //blink = capture.isOpened();
                        bitwise_not(currentLeftFrame, currentLeftFrame);
                        bitwise_not(currentRightFrame, currentRightFrame);


                        calibPoints[CAMERA_LOCATION_LEFT].push_back(currentFramePoints[CAMERA_LOCATION_LEFT]);
                        calibPoints[CAMERA_LOCATION_RIGHT].push_back(currentFramePoints[CAMERA_LOCATION_RIGHT]);
                        frameCount++;


                    }
                }

                if(foundPoints[CAMERA_LOCATION_LEFT])
                {


                    drawChessboardCorners( currentLeftFrame, struct_calibrationConfig.boardSize, Mat(currentFramePoints[CAMERA_LOCATION_LEFT]), foundPoints[CAMERA_LOCATION_LEFT] );


                }
                if(foundPoints[CAMERA_LOCATION_RIGHT])
                {



                    drawChessboardCorners( currentRightFrame, struct_calibrationConfig.boardSize, Mat(currentFramePoints[CAMERA_LOCATION_RIGHT]), foundPoints[CAMERA_LOCATION_RIGHT] );


                }



                cout << "Good Frames: " << frameCount << endl;


                imshow("Current Left Calibration Image", currentLeftFrame);
                imshow("Current Right Calibration Image", currentRightFrame);


                struct_calibrationConfig.imageSize = currentLeftFrame.size();



            }
            else
            {
                imshow("Current Left Calibration Image", currentFrameGray[CAMERA_LOCATION_LEFT]);
                imshow("Current Right Calibration Image", currentFrameGray[CAMERA_LOCATION_RIGHT]);
            }


            if((key == 'd'  || key == 'c' || key == 'r' || key == ' ') && frameCount >= 15)
            {

                currentCalibrationState =CALIBRATION_STATE_RUN;

            }
            if(key==27)
            {

                currentCalibrationState =CALIBRATION_STATE_ABORT;

            }



            break;
        }
        case CALIBRATION_STATE_RUN:
        {
            bool ok = runStereoCalibration(calibPoints, struct_calibrationConfig.imageSize, struct_calibrationConfig.boardSize, struct_calibrationConfig.squareSize,
                                           struct_calibrationConfig.aspectRatio, struct_calibrationConfig.flags, cameraMatrix, distCoeffs,
                                           R, T, R1, P1, R2, P2, Q, validRoi, rmsErr, totalAvgErr);
            printf("%s. avg reprojection error = %.2f\n",
                   ok ? "Calibration succeeded" : "Calibration failed",
                   totalAvgErr);

            if(ok)
            {
                cout << "Moving to save option." << endl;
                //Precompute maps for cv::remap()
                initUndistortRectifyMap(cameraMatrix[CAMERA_LOCATION_LEFT], distCoeffs[CAMERA_LOCATION_LEFT], R1, P1, struct_calibrationConfig.imageSize, CV_16SC2, rmap[CAMERA_LOCATION_LEFT][CAMERA_REMAP_AXIS_X], rmap[CAMERA_LOCATION_LEFT][CAMERA_REMAP_AXIS_Y]);
                initUndistortRectifyMap(cameraMatrix[CAMERA_LOCATION_RIGHT], distCoeffs[CAMERA_LOCATION_RIGHT], R2, P2, struct_calibrationConfig.imageSize, CV_16SC2, rmap[CAMERA_LOCATION_RIGHT][CAMERA_REMAP_AXIS_X], rmap[CAMERA_LOCATION_RIGHT][CAMERA_REMAP_AXIS_Y]);



                currentCalibrationState =CALIBRATION_STATE_SAVE;

            }
            else
            {

                cout << "Moving to waiting for image capture." << endl;
                currentCalibrationState =CALIBRATION_STATE_SHOW_IMAGE_BEFORE_CAPTURE;

            }
            break;
        }
        case CALIBRATION_STATE_SAVE:
        {
            key = displayRemappedStereoImages(leftCameraCapture, rightCameraCapture,validRoi,struct_calibrationConfig.imageSize,rmap);

            bool ok = false;

            vector<vector<Point2f> > noPoints[2];
            if( key == 's' )
            {
                this->imageSize = struct_calibrationConfig.imageSize;

                this->boardSize = struct_calibrationConfig.boardSize;
                this->squareSize = struct_calibrationConfig.squareSize;
                this ->aspectRatio = struct_calibrationConfig.aspectRatio;
                this->flags = struct_calibrationConfig.flags;
                this->cameraMatrix[0] = cameraMatrix[0].clone();
                this->cameraMatrix[1] = cameraMatrix[1].clone();
                this->distCoeffs[0] = distCoeffs[0].clone();
                this->distCoeffs[1] = distCoeffs[1].clone();
                this->R = R.clone();
                this->T = T.clone();
                this->R1 = R1.clone();
                this->P1 = P1.clone();
                this->R2 = R2.clone();
                this->P2 = P2.clone();
                this->Q = Q.clone();
                this->validRoi[0] = validRoi[0];
                this->validRoi[1] = validRoi[1];
                this->rmsErr = rmsErr;

                this->imagePoints[0] = calibPoints[0];
                this->imagePoints[1] = calibPoints[1];
                this->totalAvgErr = totalAvgErr;


                saveStereoCameraParams( struct_calibrationConfig.outputFilename, struct_calibrationConfig.imageSize,
                                        struct_calibrationConfig.boardSize, struct_calibrationConfig.squareSize, struct_calibrationConfig.aspectRatio,
                                        struct_calibrationConfig.flags, cameraMatrix, distCoeffs,
                                        R, T, R1, P1, R2, P2, Q,validRoi,
                                        rmsErr,
                                        struct_calibrationConfig.writePoints ? calibPoints : noPoints,
                                        totalAvgErr );
                cout << "Stereo Calibration Data Saved" << endl;
                currentCalibrationState =CALIBRATION_STATE_COMPLETE;
            }

            if(key == 27)
            {

                cout << "Move to abort" << endl;
                currentCalibrationState =CALIBRATION_STATE_ABORT;
            }

            break;
        }
        case CALIBRATION_STATE_ABORT:
            cout << "Calibration Aborted" << endl;
            currentCalibrationState =CALIBRATION_STATE_COMPLETE;
            break;
        case CALIBRATION_STATE_COMPLETE:

            cout << "Calibration Completed" << endl;
            currentCalibrationState =CALIBRATION_STATE_DONE;
            break;
        default:
            break;

        }//switch
void stereoCalibThread::stereoCalibration(const vector<string>& imagelist, int boardWidth, int boardHeight,float sqsize)
{
    Size boardSize;
    boardSize.width=boardWidth;
    boardSize.height=boardHeight;
    if( imagelist.size() % 2 != 0 )
    {
        cout << "Error: the image list contains odd (non-even) number of elements\n";
        return;
    }
    
    const int maxScale = 2;
    // ARRAY AND VECTOR STORAGE:
    
    std::vector<std::vector<Point2f> > imagePoints[2];
    std::vector<std::vector<Point3f> > objectPoints;
    Size imageSize;
    
    int i, j, k, nimages = (int)imagelist.size()/2;
    
    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    std::vector<string> goodImageList;
    
    for( i = j = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            const string& filename = imagelist[i*2+k];
            Mat img = cv::imread(filename, 0);
            if(img.empty())
                break;
            if( imageSize == Size() )
                imageSize = img.size();
            else if( img.size() != imageSize )
            {
                cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
                break;
            }
            bool found = false;
            std::vector<Point2f>& corners = imagePoints[k][j];
            for( int scale = 1; scale <= maxScale; scale++ )
            {
                Mat timg;
                if( scale == 1 )
                    timg = img;
                else
                    resize(img, timg, Size(), scale, scale);

                if(boardType == "CIRCLES_GRID") {
                    found = findCirclesGridDefault(timg, boardSize, corners, CALIB_CB_SYMMETRIC_GRID  | CALIB_CB_CLUSTERING);
                } else if(boardType == "ASYMMETRIC_CIRCLES_GRID") {
                    found = findCirclesGridDefault(timg, boardSize, corners, CALIB_CB_ASYMMETRIC_GRID | CALIB_CB_CLUSTERING);
                } else {
                    found = findChessboardCorners(timg, boardSize, corners,
                                                  CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
                }

                if( found )
                {
                    if( scale > 1 )
                    {
                        Mat cornersMat(corners);
                        cornersMat *= 1./scale;
                    }
                    break;
                }
            }
            if( !found )
                break;
            }
        if( k == 2 )
        {
            goodImageList.push_back(imagelist[i*2]);
            goodImageList.push_back(imagelist[i*2+1]);
            j++;
        }
    }
    fprintf(stdout,"%i pairs have been successfully detected.\n",j);
    nimages = j;
    if( nimages < 2 )
    {
        fprintf(stdout,"Error: too few pairs detected \n");
        return;
    }
    
    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    objectPoints.resize(nimages);
    
    for( i = 0; i < nimages; i++ )
    {
        for( j = 0; j < boardSize.height; j++ )
            for( k = 0; k < boardSize.width; k++ )
                objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0));
    }
    
    fprintf(stdout,"Running stereo calibration ...\n");
    
    Mat cameraMatrix[2], distCoeffs[2];
    Mat E, F;
    
    if(this->Kleft.empty() || this->Kright.empty())
    {
        double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
                        this->Kleft, this->DistL,
                        this->Kright, this->DistR,
                        imageSize, this->R, this->T, E, F,
                        TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
                        CV_CALIB_FIX_ASPECT_RATIO +
                        CV_CALIB_ZERO_TANGENT_DIST +
                        CV_CALIB_SAME_FOCAL_LENGTH +
                        CV_CALIB_FIX_K3);
        fprintf(stdout,"done with RMS error= %f\n",rms);
    } else
    {
        double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
                this->Kleft, this->DistL,
                this->Kright, this->DistR,
                imageSize, this->R, this->T, E, F,
                TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),CV_CALIB_FIX_ASPECT_RATIO + CV_CALIB_FIX_INTRINSIC + CV_CALIB_FIX_K3);
        fprintf(stdout,"done with RMS error= %f\n",rms);
    }
// CALIBRATION QUALITY CHECK
    cameraMatrix[0] = this->Kleft;
    cameraMatrix[1] = this->Kright;
    distCoeffs[0]=this->DistL;
    distCoeffs[1]=this->DistR;
    Mat R, T;
    T=this->T;
    R=this->R;
    double err = 0;
    int npoints = 0;
    std::vector<Vec3f> lines[2];
    for( i = 0; i < nimages; i++ )
    {
        int npt = (int)imagePoints[0][i].size();
        Mat imgpt[2];
        for( k = 0; k < 2; k++ )
        {
            imgpt[k] = Mat(imagePoints[k][i]);
            undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
            computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
        }
        for( j = 0; j < npt; j++ )
        {
            double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
                                imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
                           fabs(imagePoints[1][i][j].x*lines[0][j][0] +
                                imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
            err += errij;
        }
        npoints += npt;
    }
    fprintf(stdout,"average reprojection err = %f\n",err/npoints);
    cout.flush();
}
Esempio n. 17
0
void Viewer::on_bt_intrinsic()
{


	tvStatus->get_buffer()->set_text("Getting intrinsics ...");

	Size boardSize, imageSize;
	Mat cameraMatrix, distCoeffs;
	vector<vector<Point2f> > imagePoints;


	// TODO: get this data from user by GUI
	boardSize.width = 9;
	boardSize.height= 6;

	//flag
	int flag = 0;
	flag |= CV_CALIB_FIX_PRINCIPAL_POINT;
	flag |= CV_CALIB_ZERO_TANGENT_DIST;
	flag |= CV_CALIB_FIX_ASPECT_RATIO;


	if ( !boost::filesystem::exists(pathImage))
	{
		std::cerr << "[E] Images calibration directory doesn't exist: " << pathImage << std::endl;
		return;
	}

	// List of images
	boost::filesystem::directory_iterator end_itr;
	for ( boost::filesystem::directory_iterator itr( pathImage ); itr != end_itr; ++itr )
	{

		if ( itr->path().generic_string().find("jpg") == std::string::npos )
			continue;

		std::cout << itr->path().generic_string() << std::endl;
		Mat view;
		view = imread(itr->path().c_str(), CV_LOAD_IMAGE_COLOR);

		imageSize = view.size();

		vector<Point2f> pointBuf;
		bool found = findChessboardCorners( view, boardSize, pointBuf,
				CV_CALIB_CB_ADAPTIVE_THRESH |
				CV_CALIB_CB_FAST_CHECK |
				CV_CALIB_CB_NORMALIZE_IMAGE);

		if (found)
		{

			Mat viewGray;
			cvtColor(view, viewGray, CV_BGR2GRAY);
			cornerSubPix( viewGray, pointBuf, Size(11,11),
					Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

			imagePoints.push_back(pointBuf);

			// Draw the corners.
			drawChessboardCorners( view, boardSize, Mat(pointBuf), found );
			//imshow("Image View", view);

		}
	}


	mCalibration->runCalibrationAndSave(boardSize, 20.0, flag, imageSize,
			cameraMatrix, distCoeffs, imagePoints);

	std::cout << std::endl << cameraMatrix << std::endl;

	std::stringstream matrixStr;

	matrixStr << "Intrinsic Matrix" << std::endl;
	matrixStr << "================" << std::endl << std::endl;

	for (int i=0; i<cameraMatrix.cols ; i++)
	{
		for (int j=0; j<cameraMatrix.rows; j++)
		{
			matrixStr << cameraMatrix.at<double>(i,j) << "\t\t";
			if (i==2 && j==0)
				matrixStr << "\t";
		}
		matrixStr<<std::endl;

		tvStatus->get_buffer()->set_text(matrixStr.str().c_str());
	}


}
 void stereoCalibThread::monoCalibRun()
{


    while(imagePortInLeft.getInputCount()==0 && imagePortInRight.getInputCount()==0)
    {
        fprintf(stdout, "Connect one camera.. \n");
        Time::delay(1.0);

        if(isStopping())
            return;

    }

    bool left= imagePortInLeft.getInputCount()>0?true:false;

    string cameraName;

    if(left)
        cameraName="LEFT";
    else
        cameraName="RIGHT";

    fprintf(stdout, "CALIBRATING %s CAMERA \n",cameraName.c_str());


    int count=1;
    Size boardSize, imageSize;
    boardSize.width=this->boardWidth;
    boardSize.height=this->boardHeight;



    while (!isStopping()) { 
       if(left)
            imageL = imagePortInLeft.read(false);
       else
            imageL = imagePortInRight.read(false);

       if(imageL!=NULL){
            bool foundL=false;
            mutex->wait();
            if(startCalibration>0) {

                string pathImg=imageDir;
                preparePath(pathImg.c_str(), pathL,pathR,count);
                string iml(pathL);
                imgL= (IplImage*) imageL->getIplImage();
                Mat Left(imgL);
                std::vector<Point2f> pointbufL;

                if(boardType == "CIRCLES_GRID") {
                    foundL = findCirclesGridDefault(Left, boardSize, pointbufL, CALIB_CB_SYMMETRIC_GRID  | CALIB_CB_CLUSTERING);
                } else if(boardType == "ASYMMETRIC_CIRCLES_GRID") {
                    foundL = findCirclesGridDefault(Left, boardSize, pointbufL, CALIB_CB_ASYMMETRIC_GRID | CALIB_CB_CLUSTERING);
                } else {
                    foundL = findChessboardCorners(Left, boardSize, pointbufL, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
                }

                if(foundL) {
                        cvCvtColor(imgL,imgL,CV_RGB2BGR);
                        saveImage(pathImg.c_str(),imgL,count);
                        imageListL.push_back(iml);
                        Mat cL(pointbufL);
                        drawChessboardCorners(Left, boardSize, cL, foundL);
                        count++;
                }

                if(count>numOfPairs) {
                    fprintf(stdout," Running %s Camera Calibration... \n", cameraName.c_str());
                    monoCalibration(imageListL,this->boardWidth,this->boardHeight,this->Kleft,this->DistL);

                    fprintf(stdout," Saving Calibration Results... \n");
                    if(left)
                        updateIntrinsics(imgL->width,imgL->height,Kleft.at<double>(0,0),Kleft.at<double>(1,1),Kleft.at<double>(0,2),Kleft.at<double>(1,2),DistL.at<double>(0,0),DistL.at<double>(0,1),DistL.at<double>(0,2),DistL.at<double>(0,3),"CAMERA_CALIBRATION_LEFT");
                    else
                        updateIntrinsics(imgL->width,imgL->height,Kleft.at<double>(0,0),Kleft.at<double>(1,1),Kleft.at<double>(0,2),Kleft.at<double>(1,2),DistL.at<double>(0,0),DistL.at<double>(0,1),DistL.at<double>(0,2),DistL.at<double>(0,3),"CAMERA_CALIBRATION_RIGHT");
                        
                    fprintf(stdout, "Calibration Results Saved in %s \n", camCalibFile.c_str());

                    startCalibration=0;
                    count=1;
                    imageListL.clear();
                }


            }
            mutex->post();
            ImageOf<PixelRgb>& outimL=outPortLeft.prepare();
            outimL=*imageL;
            outPortLeft.write();

            ImageOf<PixelRgb>& outimR=outPortRight.prepare();
            outimR=*imageL;
            outPortRight.write();

            if(foundL && startCalibration==1)
                Time::delay(2.0);
            cout.flush();

        }
   }


 }
void stereoCalibThread::stereoCalibRun()
{

    imageL=new ImageOf<PixelRgb>;
    imageR=new ImageOf<PixelRgb>;

    Stamp TSLeft;
    Stamp TSRight;

    bool initL=false;
    bool initR=false;

    int count=1;
    Size boardSize, imageSize;
    boardSize.width=this->boardWidth;
    boardSize.height=this->boardHeight;


   while (!isStopping()) { 
        ImageOf<PixelRgb> *tmpL = imagePortInLeft.read(false);
        ImageOf<PixelRgb> *tmpR = imagePortInRight.read(false);

        if(tmpL!=NULL)
        {
            *imageL=*tmpL;
            imagePortInLeft.getEnvelope(TSLeft);
            initL=true;
        }
        if(tmpR!=NULL) 
        {
            *imageR=*tmpR;
            imagePortInRight.getEnvelope(TSRight);
            initR=true;
        }

        if(initL && initR && checkTS(TSLeft.getTime(),TSRight.getTime())){

            bool foundL=false;
            bool foundR=false;

            mutex->wait();
            if(startCalibration>0) {

                string pathImg=imageDir;
                preparePath(pathImg.c_str(), pathL,pathR,count);
                string iml(pathL);
                string imr(pathR);
                imgL= (IplImage*) imageL->getIplImage();
                imgR= (IplImage*) imageR->getIplImage();
                Mat Left(imgL);
                Mat Right(imgR);


                std::vector<Point2f> pointbufL;
                std::vector<Point2f> pointbufR;
                if(boardType == "CIRCLES_GRID") {
                    foundL = findCirclesGridDefault(Left, boardSize, pointbufL, CALIB_CB_SYMMETRIC_GRID  | CALIB_CB_CLUSTERING);
                    foundR = findCirclesGridDefault(Right, boardSize, pointbufR, CALIB_CB_SYMMETRIC_GRID  | CALIB_CB_CLUSTERING);
                } else if(boardType == "ASYMMETRIC_CIRCLES_GRID") {
                    foundL = findCirclesGridDefault(Left, boardSize, pointbufL, CALIB_CB_ASYMMETRIC_GRID | CALIB_CB_CLUSTERING);
                    foundR = findCirclesGridDefault(Right, boardSize, pointbufR, CALIB_CB_ASYMMETRIC_GRID | CALIB_CB_CLUSTERING);
                } else {
                    foundL = findChessboardCorners( Left, boardSize, pointbufL, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
                    foundR = findChessboardCorners( Right, boardSize, pointbufR, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
                }
                
                if(foundL && foundR) {
                        cvCvtColor(imgL,imgL,CV_RGB2BGR);
                        cvCvtColor(imgR,imgR, CV_RGB2BGR);
                        saveStereoImage(pathImg.c_str(),imgL,imgR,count);

                        imageListR.push_back(imr);
                        imageListL.push_back(iml);
                        imageListLR.push_back(iml);
                        imageListLR.push_back(imr);
                        Mat cL(pointbufL);
                        Mat cR(pointbufR);
                        drawChessboardCorners(Left, boardSize, cL, foundL);
                        drawChessboardCorners(Right, boardSize, cR, foundR);
                        count++;
                }

                if(count>numOfPairs) {
                    fprintf(stdout," Running Left Camera Calibration... \n");
                    monoCalibration(imageListL,this->boardWidth,this->boardHeight,this->Kleft,this->DistL);

                    fprintf(stdout," Running Right Camera Calibration... \n");
                    monoCalibration(imageListR,this->boardWidth,this->boardHeight,this->Kright,this->DistR);

                    stereoCalibration(imageListLR, this->boardWidth,this->boardHeight,this->squareSize);

                    fprintf(stdout," Saving Calibration Results... \n");
                    updateIntrinsics(imgL->width,imgL->height,Kright.at<double>(0,0),Kright.at<double>(1,1),Kright.at<double>(0,2),Kright.at<double>(1,2),DistR.at<double>(0,0),DistR.at<double>(0,1),DistR.at<double>(0,2),DistR.at<double>(0,3),"CAMERA_CALIBRATION_RIGHT");
                    updateIntrinsics(imgL->width,imgL->height,Kleft.at<double>(0,0),Kleft.at<double>(1,1),Kleft.at<double>(0,2),Kleft.at<double>(1,2),DistL.at<double>(0,0),DistL.at<double>(0,1),DistL.at<double>(0,2),DistL.at<double>(0,3),"CAMERA_CALIBRATION_LEFT");

                    Mat Rot=Mat::eye(3,3,CV_64FC1);
                    Mat Tr=Mat::zeros(3,1,CV_64FC1);

                    updateExtrinsics(this->R,this->T,"STEREO_DISPARITY");

                    fprintf(stdout, "Calibration Results Saved in %s \n", camCalibFile.c_str());

                    startCalibration=0;
                    count=1;
                    imageListR.clear();
                    imageListL.clear();
                    imageListLR.clear();
                }


            }
            mutex->post();
            ImageOf<PixelRgb>& outimL=outPortLeft.prepare();
            outimL=*imageL;
            outPortLeft.write();

            ImageOf<PixelRgb>& outimR=outPortRight.prepare();
            outimR=*imageR;
            outPortRight.write();

            if(foundL && foundR && startCalibration==1)
                Time::delay(2.0);
            initL=initR=false;
            cout.flush();
        }
   }

   delete imageL;
   delete imageR;
 }
/// it has multithread support under c++ 11
void CameraCalibration::getImagesAndFindPatterns(const string &cameraName)
{
	// set mode
	mode = CAPTURING;
	frameCounter = 0;
	imageCounter = 0;
	capturedFrame currentImage;

	// read current path
	currentPath = boost::filesystem::current_path();
	resultsPath = currentPath;
	cout << "current Results Path" << currentPath << '\n' << endl;	
	
	//mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;// check enum type
	
	// Capture only the frames settled in the configuration file 
	// in the original code it was a endless loop
	for (int i = 0;; ++i)
	{

		Mat view,currentView;
		bool blinkOutput = false;
		
		// capture the image
		view = s.nextImage();
		frameCounter = frameCounter + 1;
		
		
		//------------------------- Show original distorted image -----------------------------------

		Mat originalView = view.clone();
		//imshow("original Image", originalView);

		//-----  If no more image, or got enough, then stop calibration and show result -------------
		if (mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames)
		{
			if (runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints))
				mode = CALIBRATED;
			else
				mode = DETECTION;
		}
		if (view.empty())          // If no more images then run calibration, save and stop loop.
		{
			if (imagePoints.size() > 0)
				runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
			break;
		}

		
		imageSize = view.size();  // Format input image.
		if (s.flipVertical)    flip(view, view, 0);

		vector<Point2f> pointBuf;

		bool found;
		switch (s.calibrationPattern) // Find feature points on the input format
		{
		case Settings::CHESSBOARD:
			found = findChessboardCorners(view, s.boardSize, pointBuf,
				CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
			break;
		case Settings::CIRCLES_GRID:
			found = findCirclesGrid(view, s.boardSize, pointBuf);
			break;
		case Settings::ASYMMETRIC_CIRCLES_GRID:
			found = findCirclesGrid(view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID);
			break;
		default:
			found = false;
			break;
		}

		if (found)                // If done with success,
		{
			// improve the found corners' coordinate accuracy for chessboard
			if (s.calibrationPattern == Settings::CHESSBOARD)
			{
				Mat viewGray;
				cvtColor(view, viewGray, COLOR_BGR2GRAY);
				cornerSubPix(viewGray, pointBuf, Size(11, 11),
					Size(-1, -1), TermCriteria(TermCriteria::EPS+TermCriteria::MAX_ITER, 30, 0.1));
			}

			if (mode == CAPTURING &&  // For camera only take new samples after delay time
				(!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC))
			{
				imagePoints.push_back(pointBuf);
				prevTimestamp = clock();
				blinkOutput = s.inputCapture.isOpened();
				circlePoints = pointBuf;	// save ordered circle points
			}

			// Draw the corners with ID according order criteria distance(x+y) from origin point 1
			savedImage = view.clone();
			drawChessboardCorners(view, s.boardSize, Mat(pointBuf), found);

			// order the points according to d(x+y) from the upper left corner that is used as the origin frame
			std::sort(pointBuf.begin(), pointBuf.end(), [](const cv::Point2f &a, const cv::Point2f &b)
						{return ((a.x + a.y) < (b.x + b.y)); });
			
			int pointCounter = 1;
			for (auto k:pointBuf){
						
				cv::putText(view,std::to_string(pointCounter),cv::Point(k.x,k.y),cv::FONT_HERSHEY_PLAIN,1.0,cv::Scalar(255,0,0),1);
				pointCounter = pointCounter + 1;
			}			
		}

		//----------------------------- Output Text ------------------------------------------------		

		string msg = (mode == CAPTURING) ? "100/100" :
			mode == CALIBRATED ? "Calibrated" : "the images are being captured";
		int baseLine = 0;
		Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
		Point textOrigin(view.cols - 2 * textSize.width - 10, view.rows - 2 * baseLine - 10);

		if (mode == CAPTURING)
		{
			if (s.showUndistorsed)
				msg = format("%d/%d Undist", (int)imagePoints.size(), s.nrFrames);
			else
				msg = format("%d/%d", (int)imagePoints.size(), s.nrFrames);
		}

		putText(view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED);

		if (blinkOutput){

			bitwise_not(view, view);

			// save the image used for calibration to disk
			imageCounter = imageCounter + 1;
			string pathToFile;
			string filename;
			if (imageCounter <= s.nrFrames)
			{

				string imageName{ "Image" + string(std::to_string(imageCounter)) + cameraName + ".jpg" };
				boost::filesystem::path p{ "/" };	// add a slash to generate a portable string
				filename.assign(resultsPath.string() + p.generic_string() + imageName);

				vector<int> compression_params;
				compression_params.push_back(IMWRITE_JPEG_QUALITY);
				compression_params.push_back(100);
				
				// check if the file already exist? if yes, erase it
				bool found = getPathForThisFile(imageName,pathToFile);
				if (found){
					boost::filesystem::remove(pathToFile);
				}
				// write the new version of the file
				cv::imwrite(filename, savedImage);
				
				// save the points use to estimate the scale factor
				int currentBufferPosition = imagePoints.size();

				int pointCounter = 1;
				circlePatternInfo currentCircle;
				circlesDataPerImage dataCurrentImage;
				vector<circlePatternInfo> circlesInfoFromThisImage;
				for (auto k : circlePoints){

					currentCircle.circleID = pointCounter;
					currentCircle.circlePosition = cv::Point2f(k.x, k.y);
					currentCircle.circle3DPosition = circle_Mis_Positions.at(pointCounter - 1);
					circlesInfoFromThisImage.push_back(currentCircle);
					pointCounter = pointCounter + 1;
				}
				circlePoints.clear();
				dataCurrentImage.imageID= imageCounter;
				dataCurrentImage.cameraID = s.cameraID;
				dataCurrentImage.circlesData = circlesInfoFromThisImage;

				// save all the data from the asymetric circles pattern
				DataFromCirclesPattern.push_back(dataCurrentImage);
				
			}
		}
		
		//------------------------- Video capture  output  undistorted ------------------------------
		if (mode == CALIBRATED && s.showUndistorsed)
		{
			Mat temp = view.clone();
			undistort(temp, view, cameraMatrix, distCoeffs);
			string msgEsckey = "Press 'esc' key to quit";
			putText(view, msgEsckey, textOrigin, 1, 1, GREEN, 2);			

		}

		//------------------------------ Show image and check for input commands -------------------
		imshow(cameraName, view);

		//delay 30ms so that screen can refresh.
		//image will not appear without this waitKey() command
		cv::waitKey(30);

		char c = waitKey(1);

		if (c == ESC_KEY)    //  Escape key
			break;			// Breaks the capture loop

	}

}
Esempio n. 21
0
void StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=true, bool showRectified=true)
{
    if( imagelist.size() % 2 != 0 )
    {
        cout << "Error: the image list contains odd (non-even) number of elements\n";
        return;
    }
    printf("board size: %d x %d", boardSize.width, boardSize.height);
    bool displayCorners = true;
    const int maxScale = 2;
    const float squareSize = 1.f;  // Set this to your actual square size
    // ARRAY AND VECTOR STORAGE:

    vector<vector<Point2f> > imagePoints[2];
    vector<vector<Point3f> > objectPoints;
    Size imageSize;

    int i, j, k, nimages = (int)imagelist.size()/2;

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    vector<string> goodImageList;

    for( i = j = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            const string& filename = imagelist[i*2+k];
            Mat img = imread(filename, 0);
            if(img.empty())
                break;
            if( imageSize == Size() )
                imageSize = img.size();
            else if( img.size() != imageSize )
            {
                cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n";
                break;
            }
            bool found = false;
            vector<Point2f>& corners = imagePoints[k][j];
            for( int scale = 1; scale <= maxScale; scale++ )
            {
                Mat timg;
                if( scale == 1 )
                    timg = img;
                else
                    resize(img, timg, Size(), scale, scale);
                found = findChessboardCorners(timg, boardSize, corners,
                    CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE);
                if( found )
                {
                    if( scale > 1 )
                    {
                        Mat cornersMat(corners);
                        cornersMat *= 1./scale;
                    }
                    break;
                }
            }
            if( displayCorners )
            {
                cout << filename << endl;
                Mat cimg, cimg1;
                cvtColor(img, cimg, CV_GRAY2BGR);
                drawChessboardCorners(cimg, boardSize, corners, found);
                double sf = 640./MAX(img.rows, img.cols);
                resize(cimg, cimg1, Size(), sf, sf);
                imshow("corners", cimg1);
                char c = (char)waitKey(500);
                if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit
                    exit(-1);
            }
            else
                putchar('.');
            if( !found )
                break;
            cornerSubPix(img, corners, Size(11,11), Size(-1,-1),
                         TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,
                                      30, 0.01));
        }
        if( k == 2 )
        {
            goodImageList.push_back(imagelist[i*2]);
            goodImageList.push_back(imagelist[i*2+1]);
            j++;
        }
    }
    cout << j << " pairs have been successfully detected.\n";
    nimages = j;
    if( nimages < 2 )
    {
        cout << "Error: too little pairs to run the calibration\n";
        return;
    }

    imagePoints[0].resize(nimages);
    imagePoints[1].resize(nimages);
    objectPoints.resize(nimages);

    for( i = 0; i < nimages; i++ )
    {
        for( j = 0; j < boardSize.height; j++ )
            for( k = 0; k < boardSize.width; k++ )
                objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0));
    }

    cout << "Running stereo calibration ...\n";

    Mat cameraMatrix[2], distCoeffs[2];
    cameraMatrix[0] = Mat::eye(3, 3, CV_64F);
    cameraMatrix[1] = Mat::eye(3, 3, CV_64F);
    Mat R, T, E, F;

    double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1],
                    cameraMatrix[0], distCoeffs[0],
                    cameraMatrix[1], distCoeffs[1],
                    imageSize, R, T, E, F,
                    TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
                    CV_CALIB_FIX_ASPECT_RATIO +
                    CV_CALIB_ZERO_TANGENT_DIST +
                    //CV_CALIB_SAME_FOCAL_LENGTH +
                    CV_CALIB_RATIONAL_MODEL +
                    CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5);
    cout << "done with RMS error=" << rms << endl;

// CALIBRATION QUALITY CHECK
// because the output fundamental matrix implicitly
// includes all the output information,
// we can check the quality of calibration using the
// epipolar geometry constraint: m2^t*F*m1=0
    double err = 0;
    int npoints = 0;
    vector<Vec3f> lines[2];
    for( i = 0; i < nimages; i++ )
    {
        int npt = (int)imagePoints[0][i].size();
        Mat imgpt[2];
        for( k = 0; k < 2; k++ )
        {
            imgpt[k] = Mat(imagePoints[k][i]);
            undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]);
            computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]);
        }
        for( j = 0; j < npt; j++ )
        {
            double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] +
                                imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) +
                           fabs(imagePoints[1][i][j].x*lines[0][j][0] +
                                imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]);
            err += errij;
        }
        npoints += npt;
    }
    cout << "average reprojection err = " <<  err/npoints << endl;

    // save intrinsic parameters
    FileStorage fs("calib/intrinsics.yml", CV_STORAGE_WRITE);
    if( fs.isOpened() )
    {
        fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] <<
            "M2" << cameraMatrix[1] << "D2" << distCoeffs[1];
        fs.release();
    }
    else
        cout << "Error: can not save the intrinsic parameters\n";

    Mat R1, R2, P1, P2, Q;
    Rect validRoi[2];

    stereoRectify(cameraMatrix[0], distCoeffs[0],
                  cameraMatrix[1], distCoeffs[1],
                  imageSize, R, T, R1, R2, P1, P2, Q,
                  CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]);

    fs.open("calib/extrinsics.yml", CV_STORAGE_WRITE);
    if( fs.isOpened() )
    {
        fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q;
        fs.release();
    }
    else
        cout << "Error: can not save the intrinsic parameters\n";

    // OpenCV can handle left-right
    // or up-down camera arrangements
    bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3));

// COMPUTE AND DISPLAY RECTIFICATION
    if( !showRectified )
        return;

    Mat rmap[2][2];
// IF BY CALIBRATED (BOUGUET'S METHOD)
    if( useCalibrated )
    {
        // we already computed everything
    }
// OR ELSE HARTLEY'S METHOD
    else
 // use intrinsic parameters of each camera, but
 // compute the rectification transformation directly
 // from the fundamental matrix
    {
        vector<Point2f> allimgpt[2];
        for( k = 0; k < 2; k++ )
        {
            for( i = 0; i < nimages; i++ )
                std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k]));
        }
        F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0);
        Mat H1, H2;
        stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3);

        R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0];
        R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1];
        P1 = cameraMatrix[0];
        P2 = cameraMatrix[1];
    }

    //Precompute maps for cv::remap()
    initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]);
    initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]);

    Mat canvas;
    double sf;
    int w, h;
    if( !isVerticalStereo )
    {
        sf = 600./MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width*sf);
        h = cvRound(imageSize.height*sf);
        canvas.create(h, w*2, CV_8UC3);
    }
    else
    {
        sf = 300./MAX(imageSize.width, imageSize.height);
        w = cvRound(imageSize.width*sf);
        h = cvRound(imageSize.height*sf);
        canvas.create(h*2, w, CV_8UC3);
    }

    for( i = 0; i < nimages; i++ )
    {
        for( k = 0; k < 2; k++ )
        {
            Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg;
            remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR);
            cvtColor(rimg, cimg, CV_GRAY2BGR);
            Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h));
            resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA);
            if( useCalibrated )
            {
                Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf),
                          cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf));
                rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8);
            }
        }

        if( !isVerticalStereo )
            for( j = 0; j < canvas.rows; j += 16 )
                line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8);
        else
            for( j = 0; j < canvas.cols; j += 16 )
                line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8);
        imshow("rectified", canvas);
        char c = (char)waitKey();
        if( c == 27 || c == 'q' || c == 'Q' )
            break;
    }
}
Esempio n. 22
0
void CalibrateThread::run()
{
    Size boardSize, imageSize;
    float squareSize = 1.f, aspectRatio = 1.f;
    Mat cameraMatrix, distCoeffs;
    //QString of = ui->lineEdit_WorkFolder->text() + '/' + ui->lineEdit_OutputName->text();
    QByteArray ba = strFileName.toLatin1();
    const char* outputFilename = ba.data();

    int i, nframes = 0;
    bool writeExtrinsics = true, writePoints = true;
    bool undistortImage = false;
    int flags = 0;
    VideoCapture capture;
    bool flipVertical = false;
    bool showUndistorted = false;

    int delay = 1000;
    clock_t prevTimestamp = 0;
    int mode = CAPTURING;
    vector<vector<Point2f> > imagePoints;
    vector<string> imageList;
    Pattern pattern = CHESSBOARD;

    boardSize.width = m_width;
    boardSize.height = m_height;
    squareSize = m_squaresize;



    //ui->textEdit_Information->append("\nCalibrating... Please wait for a while\n");


    if( imgList.size() == 0  )
    {
        //QMessageBox::warning(NULL, "Error", "Please choose a right folder");
        emit popupErrorInformation("Please choose a right folder");
        emit closeImageWindow();
        return;
    }
    else
    {
        nframes = imgList.size();
    }

    emit appendText("\nCalibrating... Please wait for a while\n");
    //namedWindow( "Image View", 1 );
    //bDialog->show();

    for(i = 0; i < nframes ;i++)
    {
        //ui->textEdit_Information->append("Processing the image No. " + QString::number(i + 1));


        emit appendText("Processing the image No. " + QString::number(i + 1));
        Mat view, viewGray;
        bool blink = false;
        qDebug(imgList.at(i).toLatin1().data());
        if( i < (int)imgList.size() )
            view = imread(imgList.at(i).toLatin1().data(), 1);

        if(!view.data)
        {
            //QMessageBox::warning(NULL, "Error", );
            emit popupErrorInformation("Could not open image files");
            return;
        }

        imageSize = view.size();

        if( flipVertical )
            flip( view, view, 0 );

        vector<Point2f> pointbuf;
        cvtColor(view, viewGray, CV_BGR2GRAY);

        bool found;
        switch( pattern )
        {
        case CHESSBOARD:
            found = findChessboardCorners( view, boardSize, pointbuf,
                                           CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);
            break;
        case CIRCLES_GRID:
            found = findCirclesGrid( view, boardSize, pointbuf );
            break;
        case ASYMMETRIC_CIRCLES_GRID:
            found = findCirclesGrid( view, boardSize, pointbuf, CALIB_CB_ASYMMETRIC_GRID );
            break;
        }

        // improve the found corners' coordinate accuracy
        if( pattern == CHESSBOARD && found) cornerSubPix( viewGray, pointbuf, Size(11,11),
                                                          Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

        if( mode == CAPTURING && found &&
                (!capture.isOpened() || clock() - prevTimestamp > delay*1e-3*CLOCKS_PER_SEC) )
        {
            imagePoints.push_back(pointbuf);
            prevTimestamp = clock();
            blink = capture.isOpened();
        }
        if(found)
            drawChessboardCorners( view, boardSize, Mat(pointbuf), found );

        string msg = mode == CAPTURING ? "100/100" :
                                         mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
        int baseLine = 0;
        Size textSize = getTextSize(msg, 1, 1, 1, &baseLine);
        Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10);

        if( mode == CAPTURING )
        {
            if(undistortImage)
                msg = format( "%d/%d Undist", (int)imagePoints.size(), nframes );
            else
                msg = format( "%d/%d", (int)imagePoints.size(), nframes );
        }

        putText( view, msg, textOrigin, 1, 1,
                 mode != CALIBRATED ? Scalar(0,0,255) : Scalar(0,255,0));

        if( blink )
            bitwise_not(view, view);

        if( mode == CALIBRATED && undistortImage )
        {
            Mat temp = view.clone();
            undistort(temp, view, cameraMatrix, distCoeffs);
        }

        Mat rgb;
        cvtColor(view, rgb, CV_BGR2RGB);

        QImage  image32 = QImage(rgb.cols, rgb.rows, QImage::Format_RGB32);
        QRgb value;
        for(int r = 0; r < rgb.rows; r++)
        {
            for(int c = 0; c < rgb.cols; c++)
            {
                value = qRgb(rgb.ptr<uchar>(0)[r * rgb.cols * 3 + c * 3 + 0], rgb.ptr<uchar>(0)[r * rgb.cols * 3 + c * 3 + 1], rgb.ptr<uchar>(0)[r * rgb.cols * 3 + c * 3 + 2]);
                image32.setPixel(c, r, value);
            }
        }


        emit showBitmap(image32);

        int key;
        if(i < nframes - 1)
        {
            key = 0xff & waitKey(500);
        }
        else
        {
            key = waitKey(500);
        }

        if( (key & 255) == 27 )
            break;

        if( key == 'u' && mode == CALIBRATED )
            undistortImage = !undistortImage;




    }
    if(imagePoints.size() > 0)
    {
        emit appendText("\n" + QString::number(imagePoints.size()) + " out of " + QString::number(nframes) + " images are effective!\n" );
        runAndSave(outputFilename, imagePoints, imageSize,
                   boardSize, pattern, squareSize, aspectRatio,
                   flags, cameraMatrix, distCoeffs,
                   writeExtrinsics, writePoints);

    }
    else
    {
        emit appendText("Calibrating is not successful! \nPlease change the parameters and try again!");
        emit popupErrorInformation("Sorry, no enough points are detected! Please try another group of images!");
        emit closeImageWindow();
        return;

    }


    emit appendText("Calibrating Successfully! \nPlease go to the folder to check the out put files!");
    emit closeImageWindow();
    if( !capture.isOpened() && showUndistorted )
    {
        Mat view, rview, map1, map2;
        initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(),
                                getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
                                imageSize, CV_16SC2, map1, map2);

        for( i = 0; i < (int)imageList.size(); i++ )
        {
            view = imread(imageList[i], 1);
            if(!view.data)
                continue;
            //undistort( view, rview, cameraMatrix, distCoeffs, cameraMatrix );
            remap(view, rview, map1, map2, INTER_LINEAR);
            imshow("Image View", rview);
            int c = 0xff & waitKey();
            if( (c & 255) == 27 || c == 'q' || c == 'Q' )
                break;
        }
    }

    return;
}
void CameraCalibration::StereoCalibration()
{

	vector<vector<Point2f> > ImagePoints[2];

	vector<vector<Point3f> > ObjectPoints(1);
	for(int i=0; i<BoardSize.height; i++)
		for(int j=0; j<BoardSize.width; j++)
			ObjectPoints.at(0).push_back(Point3f(float( j*SquareSize ),
			float( i*SquareSize ),
			0));

	ObjectPoints.resize(NumFrames, ObjectPoints[0]);

	vector<Mat> RVecs[2], TVecs[2];
	double rms;

	for(int c_idx=0; c_idx<2; c_idx++)
	{
		for(int i=0; i < NumFrames; i++)
		{

			Mat img = imread(data_path+"/"+calib_params[c_idx].ImageList.at(i), CV_LOAD_IMAGE_COLOR);

			vector<Point2f> pointBuf;
			bool found = false;

			found = findChessboardCorners(img, BoardSize, pointBuf,
				CV_CALIB_CB_ADAPTIVE_THRESH |
				CV_CALIB_CB_NORMALIZE_IMAGE);

			if(found)
			{
				Mat viewGray;
				cvtColor(img, viewGray, CV_BGR2GRAY);
				cornerSubPix(viewGray, pointBuf, Size(11, 11), Size(-1, -1),
					TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 100, 0.01));

				//drawChessboardCorners(img, BoardSize, Mat(pointBuf), found);
				//namedWindow("Image View", CV_WINDOW_AUTOSIZE);
				//imshow("Image View", img);
				//waitKey();
			}
			else
			{
				cerr << i << "th image cannot be found a pattern." << endl;
				exit(EXIT_FAILURE);
			}

			ImagePoints[c_idx].push_back(pointBuf);
		}

		calib_params[c_idx].DistCoeffs = Mat::zeros(8, 1, CV_64F);
		calib_params[c_idx].CameraMatrix = initCameraMatrix2D(ObjectPoints, ImagePoints[c_idx], ImageSize, 0);
		rms = calibrateCamera(ObjectPoints, 
			ImagePoints[c_idx],
			ImageSize,
			calib_params[c_idx].CameraMatrix,
			calib_params[c_idx].DistCoeffs,
			RVecs[c_idx],
			TVecs[c_idx],
			CV_CALIB_USE_INTRINSIC_GUESS |
			CV_CALIB_FIX_K3 |	
			CV_CALIB_FIX_K4 |
			CV_CALIB_FIX_K5 |
			CV_CALIB_FIX_K6);

		cout << c_idx << " camera re-projection error reported by calibrateCamera: "<< rms << endl;

	}

	rms = stereoCalibrate(ObjectPoints,
						  ImagePoints[0],
						  ImagePoints[1],
						  calib_params[0].CameraMatrix,
						  calib_params[0].DistCoeffs,
						  calib_params[1].CameraMatrix,
						  calib_params[1].DistCoeffs,
						  ImageSize,
						  stereo_params->R,
						  stereo_params->T,
						  stereo_params->E,
						  stereo_params->F,
						  TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5));

	cout << "Stereo re-projection error reported by stereoCalibrate: " << rms << endl;

	cout << "Fundamental Matrix reprojection error: " << FundamentalMatrixQuality(ImagePoints[0], ImagePoints[1], 
		calib_params[0].CameraMatrix, calib_params[1].CameraMatrix, 
		calib_params[0].DistCoeffs, calib_params[1].DistCoeffs, stereo_params->F) << endl;        

    // Transfer matrix from OpenCV Mat to Pangolin matrix
	CvtCameraExtrins(RVecs, TVecs);
	
    Timer PangolinTimer;

	// Stereo rectification
	stereoRectify(calib_params[0].CameraMatrix,
		calib_params[0].DistCoeffs,
		calib_params[1].CameraMatrix,
		calib_params[1].DistCoeffs,
		ImageSize, 
		stereo_params->R, 
		stereo_params->T, 
		rect_params->LeftRot,
		rect_params->RightRot,
		rect_params->LeftProj,
		rect_params->RightProj,
		rect_params->Disp2DepthReProjMat,
		CALIB_ZERO_DISPARITY, // test later
		1, // test later
		ImageSize,
		&rect_params->LeftRoi, 
		&rect_params->RightRoi);

    cout << "\nStereo rectification using calibration spent: " << PangolinTimer.getElapsedTimeInMilliSec() << "ms." << endl;

	rect_params->isVerticalStereo = fabs(rect_params->RightProj.at<double>(1, 3)) > 
										fabs(rect_params->RightProj.at<double>(0, 3));

	// Get the rectification re-map index
	initUndistortRectifyMap(calib_params[0].CameraMatrix, calib_params[0].DistCoeffs, 
		rect_params->LeftRot, rect_params->LeftProj,	
		ImageSize, CV_16SC2, rect_params->LeftRMAP[0], rect_params->LeftRMAP[1]);
	initUndistortRectifyMap(calib_params[1].CameraMatrix, calib_params[1].DistCoeffs, 
		rect_params->RightRot, rect_params->RightProj, 
		ImageSize, CV_16SC2, rect_params->RightRMAP[0], rect_params->RightRMAP[1]);

}
void CameraCalibration::MonoCalibration()
{

    vector<vector<Point2f> > ImagePoints;

    for(int i=0; i < NumFrames; i++)
    {

        Mat img = imread(calib_params[0].ImageList.at(i), CV_LOAD_IMAGE_COLOR);
        
        vector<Point2f> pointBuf;
        bool found = false;
        
        found = findChessboardCorners(img, BoardSize, pointBuf,
                                      CV_CALIB_CB_ADAPTIVE_THRESH |
                                      CV_CALIB_CB_NORMALIZE_IMAGE);

        
        if(found)
        {
            Mat viewGray;
            cvtColor(img, viewGray, CV_BGR2GRAY);
            cornerSubPix(viewGray, pointBuf, Size(11, 11), Size(-1, -1),
                         TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 100, 0.01));

//            drawChessboardCorners(img_left, calib_params.BoardSize, Mat(left_pointBuf), found_left);
//            imshow("Left Image View", img_left);
//            drawChessboardCorners(img_right, calib_params.BoardSize, Mat(right_pointBuf), found_right);
//            imshow("Right Image View", img_right);
//            waitKey();
        }
        else
        {
            cerr << i << "th image cannot be found a pattern." << endl;
            exit(EXIT_FAILURE);
        }

        ImagePoints.push_back(pointBuf);
        
    }

    vector<vector<Point3f> > ObjectPoints(1);
    for(int i=0; i<BoardSize.height; i++)
        for(int j=0; j<BoardSize.width; j++)
            ObjectPoints.at(0).push_back(Point3f(float( j*SquareSize ),
                                           float( i*SquareSize ),
                                           0));

    ObjectPoints.resize(ImagePoints.size(), ObjectPoints[0]);

    vector<Mat> RVecs, TVecs;

    calib_params->DistCoeffs = Mat::zeros(8, 1, CV_64F);
    
    calib_params->CameraMatrix = initCameraMatrix2D(ObjectPoints, ImagePoints, ImageSize, 0);
    double rms = calibrateCamera(ObjectPoints,
                                 ImagePoints,
                                 ImageSize,
                                 calib_params->CameraMatrix,
                                 calib_params->DistCoeffs,
                                 RVecs,
                                 TVecs,
                                 CV_CALIB_USE_INTRINSIC_GUESS |
                                 CV_CALIB_FIX_K3 |
                                 CV_CALIB_FIX_K4 |
                                 CV_CALIB_FIX_K5 |
                                 CV_CALIB_FIX_K6);

   cout << "Camera re-projection error reported by calibrateCamera: "<< rms << endl;

   // Transfer matrix from OpenCV Mat to Pangolin matrix
   CvtCameraExtrins(&RVecs, &TVecs);

}
Esempio n. 25
0
int main(int argc, char *argv[])
{
    // parse configuration file
    // get input arguments

    OpenCvStereoConfig stereo_config;
    string config_file = "";

    ConciseArgs parser(argc, argv);
    parser.add(config_file, "c", "config", "Configuration file containing camera GUIDs, etc.", true);
    parser.add(left_camera_mode, "l", "left-camera", "Calibrate just the left camera.");
    parser.add(right_camera_mode, "r", "right-camera", "Calibrate just the right camera.");
    parser.add(force_brightness, "b", "brightness", "set brightness to this level");
    parser.add(force_exposure, "e", "exposure", "set exposure to this level");
    parser.parse();

    // parse the config file
    if (ParseConfigFile(config_file, &stereo_config) != true)
    {
        fprintf(stderr, "Failed to parse configuration file, quitting.\n");
        return -1;
    }

    if (left_camera_mode || right_camera_mode)
    {
        stereo_mode = false;
    }

    uint64 guid = stereo_config.guidLeft;
    uint64 guid2 = stereo_config.guidRight;


    dc1394_t        *d;
    dc1394camera_t  *camera;
    dc1394error_t   err;

    Mat frame_array_left[MAX_FRAMES];
    Mat frame_array_right[MAX_FRAMES];

    int numFrames = 0;

    // ----- cam 2 -----
    dc1394_t        *d2;
    dc1394camera_t  *camera2;
    dc1394error_t   err2;

    d = dc1394_new ();
    if (!d)
        g_critical("Could not create dc1394 context");

    d2 = dc1394_new ();
    if (!d2)
        g_critical("Could not create dc1394 context for camera 2");

    camera = dc1394_camera_new (d, guid);
    if (!camera)
        g_critical("Could not create dc1394 camera");

    camera2 = dc1394_camera_new (d2, guid2);
    if (!camera2)
        g_critical("Could not create dc1394 camera for camera 2");

    // setup
    err = setup_gray_capture(camera, DC1394_VIDEO_MODE_FORMAT7_1);
    DC1394_ERR_CLN_RTN(err, cleanup_and_exit(camera), "Could not setup camera");

    err2 = setup_gray_capture(camera2, DC1394_VIDEO_MODE_FORMAT7_1);
    DC1394_ERR_CLN_RTN(err2, cleanup_and_exit(camera2), "Could not setup camera number 2");

    // enable auto-exposure
    // turn on the auto exposure feature
    err = dc1394_feature_set_power(camera, DC1394_FEATURE_EXPOSURE, DC1394_ON);
    DC1394_ERR_RTN(err,"Could not turn on the exposure feature");

    err = dc1394_feature_set_mode(camera, DC1394_FEATURE_EXPOSURE, DC1394_FEATURE_MODE_ONE_PUSH_AUTO);
    DC1394_ERR_RTN(err,"Could not turn on Auto-exposure");

    // enable auto-exposure
    // turn on the auto exposure feature
    err = dc1394_feature_set_power(camera2, DC1394_FEATURE_EXPOSURE, DC1394_ON);
    DC1394_ERR_RTN(err,"Could not turn on the exposure feature for cam2");

    err = dc1394_feature_set_mode(camera2, DC1394_FEATURE_EXPOSURE, DC1394_FEATURE_MODE_ONE_PUSH_AUTO);
    DC1394_ERR_RTN(err,"Could not turn on Auto-exposure for cam2");

    // enable camera
    err = dc1394_video_set_transmission(camera, DC1394_ON);
    DC1394_ERR_CLN_RTN(err, cleanup_and_exit(camera), "Could not start camera iso transmission");
    err2 = dc1394_video_set_transmission(camera2, DC1394_ON);
    DC1394_ERR_CLN_RTN(err2, cleanup_and_exit(camera2), "Could not start camera iso transmission for camera number 2");

    if (left_camera_mode || stereo_mode)
    {
        InitBrightnessSettings(camera, camera2);
        MatchBrightnessSettings(camera, camera2, true, force_brightness, force_exposure);
    } else {
        // use the right camera as the master for brightness
        // since we're calibrating that one
        InitBrightnessSettings(camera2, camera);
        MatchBrightnessSettings(camera2, camera, true);
    }

    // make opencv windows
    if (left_camera_mode || stereo_mode)
    {
    	namedWindow("Input Left", CV_WINDOW_AUTOSIZE);
    	moveWindow("Input Left", 100, 100);
    }

    if (right_camera_mode || stereo_mode)
    {
    	namedWindow("Input Right", CV_WINDOW_AUTOSIZE);
    	moveWindow("Input Right", 478, 100);
    }


    CvSize size;

    Mat cornersL, cornersR;

    int i;

    while (numFrames < MAX_FRAMES) {

        Mat chessL, chessR;

        // each loop dump a bunch of frames to clear the buffer
        MatchBrightnessSettings(camera, camera2, true, force_brightness, force_exposure);
        for (i=0;i<10;i++)
        {
            if (left_camera_mode || stereo_mode)
            {
                chessL = GetFrameFormat7(camera);
            }

            if (right_camera_mode || stereo_mode)
            {
                chessR = GetFrameFormat7(camera2);
            }
        }

        // copy the images for drawing/display
        size = chessL.size();
        Mat chessLc;
        chessLc.create(size, CV_32FC3);
        Mat chessRc;
        chessRc.create(size, CV_32FC3);

        // attempt checkerboard matching
        bool foundPattern = true; // set to true so we can do an OR
                                  // later if we're only using one
                                  // camera

        if (left_camera_mode || stereo_mode)
        {
            foundPattern = findChessboardCorners(chessL, Size(CHESS_X, CHESS_Y), cornersL);
        }

        if (right_camera_mode || stereo_mode)
        {
            foundPattern = foundPattern & findChessboardCorners(chessR, Size(CHESS_X, CHESS_Y), cornersR);
        }

        if (left_camera_mode || stereo_mode)
        {
            cvtColor( chessL, chessLc, CV_GRAY2BGR );
            drawChessboardCorners(chessLc, Size(CHESS_X, CHESS_Y), cornersL, foundPattern);
            imshow("Input Left", chessLc);
        }

        if (right_camera_mode || stereo_mode)
        {
            cvtColor(chessR, chessRc, CV_GRAY2BGR);
            drawChessboardCorners(chessRc, Size(CHESS_X, CHESS_Y), cornersR, foundPattern);

            imshow("Input Right", chessRc);
        }


		// key codes:
		// page up: 654365
		// page down: 65366
		// b: 98
		char key = waitKey();
		//printf("%d\n", (int)key);
		if (key == 98)
		{
		    break;
		} else if (key == 86){
		    if (foundPattern)
		    {
		        // this was a good one -- save it
		        frame_array_left[numFrames] = chessL;
                frame_array_right[numFrames] = chessR;

                // give the user some guidence on the number
                // of frames they should be using
                if (stereo_mode)
                {
                    printf("Saved frame %d / about 10\n", numFrames);
                } else {
                    printf("Saved frame %d / about 20-30\n", numFrames);
                }

                numFrames ++;
            } else {
                printf("Not saving frame since did not find a checkboard.\n");
            }
		} else if (key == 'W') {
            force_brightness +=20;
            cout << "Brightness: " << force_brightness << "\n";
        } else if (key == 'w') {
            force_brightness -=20;
            cout << "Brightness: " << force_brightness << "\n";
        } else if (key == 'E') {
            force_exposure +=20;
            cout << "Exposure: " << force_exposure << "\n";
        } else if (key == 'e') {
            force_exposure -=20;
            cout << "Exposure: " << force_exposure << "\n";
        }
	}

    printf("\n\n");

    // clear out the calibration directory
    printf("Deleting old images...\nrm calibrationImages/*.ppm\n");
    int retval = system("rm calibrationImages/*.ppm");
    if (retval != 0) {
        printf("Warning: Deleting images may have failed.\n");
    }
    printf("done.\n");

    char filename[1000];

    for (i=0;i<numFrames;i++)
    {
        if (left_camera_mode || stereo_mode)
        {
            sprintf(filename, "calibrationImages/cam1-%05d.ppm", i+1);
            imwrite(filename, frame_array_left[i]);
        }

        if (right_camera_mode || stereo_mode)
        {
            sprintf(filename, "calibrationImages/cam2-%05d.ppm", i+1);
            imwrite(filename, frame_array_right[i]);
        }

        printf("Writing frame %d\n", i);
    }

    printf("\n\n");

    destroyWindow("Input Left");
    destroyWindow("Input Right");

    // stop data transmission
    err = dc1394_video_set_transmission(camera, DC1394_OFF);
    DC1394_ERR_CLN_RTN(err,cleanup_and_exit(camera),"Could not stop the camera");

    err2 = dc1394_video_set_transmission(camera2, DC1394_OFF);
    DC1394_ERR_CLN_RTN(err2,cleanup_and_exit(camera2),"Could not stop the camera 2");

    // close camera
    cleanup_and_exit(camera);
    cleanup_and_exit(camera2);
    dc1394_free (d);
    dc1394_free (d2);

    return 0;
}
Esempio n. 26
0
void CameraCalibration::calibrate()
{
    const string inputSettingsFile = "default.xml";
    // Read the settings
    FileStorage fs( inputSettingsFile, FileStorage::READ );
    if ( !fs.isOpened() ) {
        FileStorage fs( inputSettingsFile, FileStorage::WRITE );
        fs.release();
        cerr << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl;
        return;
    }
    else {
        s.read( fs["Settings"] );
        // close Settings file
        fs.release();
    }

    if ( !s.goodInput ) {
        cerr << "Invalid input detected. Application stopping." << endl;
        return;
    }


    vector<vector<Point2f> > imagePoints;
    Mat distCoeffs;
    Size imageSize;
    int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION;
    clock_t prevTimestamp = 0;
    const Scalar RED( 0, 0, 255 ), GREEN( 0, 255, 0 );
    const char ESC_KEY = 27;

    for ( int i = 0; ; ++i ) {
        Mat view;
        bool blinkOutput = false;

        view = s.nextImage();

        //-----  If no more image, or got enough, then stop calibration and show result -------------
        if ( mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames ) {
            if ( runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints ) ) {
                mode = CALIBRATED;
            }
            else {
                mode = DETECTION;
            }
        }
        // If no more images then run calibration, save and stop loop.
        if ( view.empty() ) {
            if ( imagePoints.size() > 0 ) {
                runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints);
            }
            break;
        }

        imageSize = view.size();  // Format input image.
        if ( s.flipVertical ) {
            flip( view, view, 0 );
        }

        vector<Point2f> pointBuf;

        bool found;
        // Find feature points on the input format
        switch ( s.calibrationPattern ) {
        case Settings::CHESSBOARD:
            found = findChessboardCorners( view, s.boardSize, pointBuf,
                CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE);
            break;
        case Settings::CIRCLES_GRID:
            found = findCirclesGrid( view, s.boardSize, pointBuf );
            break;
        case Settings::ASYMMETRIC_CIRCLES_GRID:
            found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID );
            break;
        default:
            found = false;
            break;
        }

        // If done with success, improve the found corners' coordinate accuracy for chessboard
        if ( found ) {
                if ( s.calibrationPattern == Settings::CHESSBOARD ) {
                    Mat viewGray;
                    cvtColor( view, viewGray, COLOR_BGR2GRAY );
                    cornerSubPix( viewGray, pointBuf, Size( 11,11 ), Size(-1,-1), TermCriteria( TermCriteria::EPS + TermCriteria::MAX_ITER, 30, 0.1 ) );
                }

                // For camera only take new samples after delay time
                if ( mode == CAPTURING && (!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) ) {
                    imagePoints.push_back( pointBuf );
                    prevTimestamp = clock();
                    blinkOutput = s.inputCapture.isOpened();
                }

                // Draw the corners.
                drawChessboardCorners( view, s.boardSize, Mat( pointBuf ), found );
        }

        //----------------------------- Output Text ------------------------------------------------
        string msg = ( mode == CAPTURING ) ? "100/100" :
                      mode == CALIBRATED ? "Calibrated" : "Press 'g' to start";
        int baseLine = 0;
        Size textSize = getTextSize( msg, 1, 1, 1, &baseLine );
        Point textOrigin( view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10 );

        if ( mode == CAPTURING ) {
            if ( s.showUndistorsed ) {
                msg = format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames );
            }
            else {
                msg = format( "%d/%d", (int)imagePoints.size(), s.nrFrames );
            }
        }

        putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ?  GREEN : RED );

        if ( blinkOutput ) {
            bitwise_not( view, view );
        }

        //------------------------- Video capture  output  undistorted ------------------------------
        if ( mode == CALIBRATED && s.showUndistorsed ) {
            Mat temp = view.clone();
            undistort( temp, view, cameraMatrix, distCoeffs );
        }

        //------------------------------ Show image and check for input commands -------------------
        imshow( "Image View", view );
        char key = (char)waitKey( s.inputCapture.isOpened() ? 50 : s.delay );

        if ( key  == ESC_KEY ) {
            break;
        }

        if ( key == 'u' && mode == CALIBRATED ) {
            s.showUndistorsed = !s.showUndistorsed;
        }

        if ( s.inputCapture.isOpened() && key == 'g' ) {
            mode = CAPTURING;
            imagePoints.clear();
        }
    }

    // -----------------------Show the undistorted image for the image list ------------------------
    if ( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed ) {
        Mat view, rview, map1, map2;
        initUndistortRectifyMap( cameraMatrix, distCoeffs, Mat(),
            getOptimalNewCameraMatrix( cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0 ),
            imageSize, CV_16SC2, map1, map2 );

        for ( int i = 0; i < (int)s.imageList.size(); i++ ) {
            view = imread( s.imageList[i], 1 );
            if ( view.empty() ) {
                continue;
            }
            remap( view, rview, map1, map2, INTER_LINEAR );
            imshow( "Image View", rview );
            char c = (char)waitKey();
            if ( c  == ESC_KEY || c == 'q' || c == 'Q' ) {
                break;
            }
        }
    }
}
Esempio n. 27
0
std::vector<Point> KinectCalibrator::findCalibrationSquare(Mat rgbMatrix){
    Size chessboardSize(5,4);
    bool test4 = findChessboardCorners(rgbMatrix, chessboardSize, _pointVector);

    return _pointVector;
}
Esempio n. 28
0
int main_file(Parameter *pParam)
{
	char *file_list = pParam->image_list;//"/home/sean/Pictures/calib_test1/dir.txt";

	Size boardSize(8,6);
	Size imageSize;
	int   flags = CV_CALIB_FIX_ASPECT_RATIO;
	float squareSize = pParam->square_size;
	float aspectRatio = 1.f;
	Mat cameraMatrix;
	Mat distCoeffs;
	int result = 0;

	// read frames from data file
	vector<vector<Point2f> > imagePointsSet;
	vector<Point2f> imagePoints;
	vector<string>  fileNames;

	fileNames.clear();
	imagePointsSet.clear();

	getFileName(file_list, fileNames);

	for(unsigned int i = 0; i < fileNames.size(); i++)
	{
		Mat framecv;
		int found = 0;

		framecv = imread(fileNames[i].c_str(), 0);

		if(framecv.cols <= 0 || framecv.rows <= 0 || framecv.data == NULL )
		{
			printf("finish chess board detection \n");
			break;
		}

		imagePoints.clear();
		imageSize.width = framecv.cols;
		imageSize.height = framecv.rows;

		found = findChessboardCorners(
						framecv,
						boardSize,
						imagePoints,
						CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);

		if(found)
		{
			cornerSubPix(
					framecv,
					imagePoints,
					Size(11,11),
					Size(-1,-1),
					TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

			if(found)
			{
				drawChessboardCorners( framecv, boardSize, Mat(imagePoints), found);
				imshow("framecv_xx", framecv);
				waitKey(10);
			}

			imagePointsSet.push_back(imagePoints);

		}

	}


	// calibrate cameras
	if(1)
    {
	    vector<Mat> rvecs, tvecs;
	    vector<float> reprojErrs;
	    double totalAvgErr = 0;

	    result = runCalibration(imagePointsSet, imageSize, boardSize, CHESSBOARD, squareSize,
                   aspectRatio, flags, cameraMatrix, distCoeffs,
                   rvecs, tvecs, reprojErrs, totalAvgErr);
    }

	// test calibrate
	if(1)
	{
        Mat view, rview, map1, map2;
        int i;
        Size imageSize2;

        imageSize2.width  = 2 * imageSize.width;
        imageSize2.height = 2 * imageSize.height;

        initUndistortRectifyMap(cameraMatrix,
        						distCoeffs,
								Mat(),
                                getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
                                imageSize, CV_16SC2, map1, map2);


        for(i = 0; i < fileNames.size(); i++)
        {
        	view = imread(fileNames[i].c_str());
        	remap(view, rview, map1, map2, INTER_LINEAR);

        	imshow("rview", rview);
        	waitKey(0);
        }
	}

	// save
	if(result == 0 )
	{
		save_result(pParam->output_path, cameraMatrix, distCoeffs);
	}

}
Esempio n. 29
0
int main_camera(Parameter *pParam)
{

	Size boardSize(8,6);
	Size imageSize;
	int   flags = CV_CALIB_FIX_ASPECT_RATIO;
	float squareSize = pParam->square_size;
	float aspectRatio = 1.f;
	Mat cameraMatrix;
	Mat distCoeffs;
	Mat frame;
	VideoCapture video;

	int flag_finish = 0;

	int result = 0;

	// read frames from data file
	vector<vector<Point2f> > imagePointsSet;
	vector<Point2f> imagePoints;
	vector<string>  fileNames;

	fileNames.clear();
	imagePointsSet.clear();

	video.open(pParam->camera_index);

	if(video.isOpened() != true)
	{
		printf("fail to open camera %d\n", pParam->camera_index);

		video.open(-1);

		if(video.isOpened() != true)
		{
			printf("fail to open the default camera, please make sure an accessible camera is connected \n");
			return -1;
		}
		else
		{
			printf("open the default camera\n");
		}
	}

	while(flag_finish == 0)
	{
		Mat framecv;
		int found = 0;

		video >> frame;

		cvtColor(frame, framecv, CV_RGB2GRAY);

		imshow("framecv", framecv); // for oberserving input
		waitKey(10);

		if(framecv.cols <= 0 || framecv.rows <= 0 || framecv.data == NULL )
		{
			printf("finish chess board detection \n");
			break;
		}

		imagePoints.clear();
		imageSize.width = framecv.cols;
		imageSize.height = framecv.rows;

		found = findChessboardCorners(
						framecv,
						boardSize,
						imagePoints,
						CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE);

		if(found)
		{
			cornerSubPix(
					framecv,
					imagePoints,
					Size(11,11),
					Size(-1,-1),
					TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 ));

			if(found)
			{
				char key = 0;
				drawChessboardCorners( framecv, boardSize, Mat(imagePoints), found);
				imshow("framecv_xx", framecv);
				key = waitKey(0);
				if(key == 'c' || key == 'C') // not correct
					continue;
				else if(key == 'q' || key == 'Q')
					return 0;
				else if(key == 's' || key == 'S')
					flag_finish = 1;
			}

			printf("get a new chess board input\n");
			imagePointsSet.push_back(imagePoints);

		}
		else
		{
			printf("no found usable chess board\n");
		}

	}


	// calibrate cameras
	if(1)
    {
	    vector<Mat> rvecs, tvecs;
	    vector<float> reprojErrs;
	    double totalAvgErr = 0;

	    result = runCalibration(imagePointsSet, imageSize, boardSize, CHESSBOARD, squareSize,
                   aspectRatio, flags, cameraMatrix, distCoeffs,
                   rvecs, tvecs, reprojErrs, totalAvgErr);
    }

	// test calibrate
	if(1)
	{
        Mat view, rview, map1, map2;
        int i;
        Size imageSize2;

        imageSize2.width  = 2 * imageSize.width;
        imageSize2.height = 2 * imageSize.height;

        initUndistortRectifyMap(cameraMatrix,
        						distCoeffs,
								Mat(),
                                getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0),
                                imageSize, CV_16SC2, map1, map2);


        while(1)
        {
        	char key = 0;
        	video>>view;
        	remap(view, rview, map1, map2, INTER_LINEAR);

        	imshow("rview", rview);
        	key = waitKey(0);
        	if(key == 's')
        		break;
        	else if(key == 'q')
        		break;
        }
	}
Esempio n. 30
0
/// Calculates the corner pixel locations as detected by each camera
/// In: s: board settings, includes size, square size and the number of corners
///     inputCapture1: video capture for camera 1
///     inputCapture2: video capture for camera 2
///     iterations: number of chessboard images to take
/// Out: imagePoints1: pixel coordinates of chessboard corners for camera 1
///      imagePoints2: pixel coordinates of chessboard corners for camera 2
bool RetrieveChessboardCorners(vector<vector<Point2f> >& imagePoints1, vector<vector<Point2f> >& imagePoints2, BoardSettings s, VideoCapture inputCapture1,VideoCapture inputCapture2, int iterations)
{
    destroyAllWindows();
    Mat image1,image2;
    vector<Point2f> pointBuffer1;
    vector<Point2f> pointBuffer2;
    clock_t prevTimeStamp = 0;
    bool found1,found2;
    int count = 0;
    while (count != iterations){
        char c = waitKey(15);
        if (c == 's'){
            cerr << "Calibration stopped" << endl;
            return false;
        }
        // try find chessboard corners
        else if(c == 'c'){
            // ADAPTIVE_THRESH -> use adaptive thresholding to convert image to B&W
            // FAST_CHECK -> Terminates call earlier if no chessboard in image
            // NORMALIZE_IMAGE -> normalize image gamma before thresholding
            // FILTER_QUADS -> uses additional criteria to filter out false quads
            found1 = findChessboardCorners(image1, s.boardSize, pointBuffer1,
                                           CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK |
                                           CV_CALIB_CB_NORMALIZE_IMAGE | CV_CALIB_CB_FILTER_QUADS);
            found2 = findChessboardCorners(image2, s.boardSize, pointBuffer2,
                                           CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK |
                                           CV_CALIB_CB_NORMALIZE_IMAGE | CV_CALIB_CB_FILTER_QUADS);
            
            if (found1 && found2 && (pointBuffer1.size() >= s.cornerNum) && (pointBuffer2.size() >= s.cornerNum)){
                // if time delay passed refine accuracy and store
                if ((clock() - prevTimeStamp) > CAPTURE_DELAY * 1e-3*CLOCKS_PER_SEC){
                    Mat imageGray1, imageGray2;
                    cvtColor(image1, imageGray1, COLOR_BGR2GRAY);
                    cvtColor(image2, imageGray2, COLOR_BGR2GRAY);
                    
                    // refines corner locations
                    // Size(11,11) -> size of the search window
                    // Size(-1,-1) -> indicates no dead zone in search size
                    // TermCriteria -> max 1000 iteration, to get acuraccy of 0.01
                    cornerSubPix(imageGray1, pointBuffer1, Size(5,5), Size(-1, -1),
                                 TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01));
                    cornerSubPix(imageGray2, pointBuffer2, Size(5,5), Size(-1, -1),
                                 TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01));
                    
                    drawChessboardCorners(image1, s.boardSize, Mat(pointBuffer1), found1);
                    imshow("Image View1", image1);
                    drawChessboardCorners(image2, s.boardSize, Mat(pointBuffer2), found2);
                    imshow("Image View2", image2);
                    
                    // user verifies the correct corners have been found
                    c = waitKey(0);
                    if (c == 's'){
                        return false;
                    }
                    if (c == 'y'){
                        // store the points and store time stamp
                        imagePoints1.push_back(pointBuffer1);
                        imagePoints2.push_back(pointBuffer2);
                        prevTimeStamp = clock();
                        count++;
                        cerr << "Count: " << count << endl;
                    }
                }
            }
        }
        inputCapture1.read(image1);
        inputCapture2.read(image2);
        imshow("Image View1", image1);
        imshow("Image View2", image2);
    }
    // found all corners
    return true;
}