FrameProcessor::FrameProcessor(std::string intrinsicsFile, std::string extrinsicsFile, Size frameSize){ FileStorage intrinsics("device/camera_properties/intrinsics.yml", FileStorage::READ); FileStorage extrinsics("device/camera_properties/extrinsics.yml", FileStorage::READ); intrinsics["M1"] >> M1; intrinsics["M2"] >> M2; intrinsics["D1"] >> D1; intrinsics["D2"] >> D2; extrinsics["R1"] >> R1; extrinsics["R2"] >> R2; extrinsics["P1"] >> P1; extrinsics["P2"] >> P2; extrinsics["Q"] >> Q; intrinsics.release(); extrinsics.release(); initUndistortRectifyMap(M1, D1, R1, P1, frameSize, CV_16SC2, rmap[0][0], rmap[0][1]); initUndistortRectifyMap(M2, D2, R2, P2, frameSize, CV_16SC2, rmap[1][0], rmap[1][1]); // BlockMatcher.state->preFilterType = CV_STEREO_BM_XSOBEL; //CV_STEREO_BM_NORMALIZED_RESPONSE; // BlockMatcher.state->preFilterSize = 9; BlockMatcher.state->preFilterCap = 45; BlockMatcher.state->SADWindowSize = 31; // BlockMatcher.state->minDisparity = 0; BlockMatcher.state->numberOfDisparities = 128; // BlockMatcher.state->textureThreshold = 10; // BlockMatcher.state->uniquenessRatio = 15; //BlockMatcher.state->speckleRange = 60; //BlockMatcher.state->speckleWindowSize = 20; // BlockMatcher.state->trySmallerWindows = 0; // BlockMatcher.state->roi1 = BlockMatcher.state->roi2 = cvRect(160,120,480,360); // BlockMatcher.state->disp12MaxDiff = -1; }
void RGBDCalibration :: updateDistortionMaps() { initUndistortRectifyMap(rgb_intrinsics, rgb_distortion, Mat(), rgb_intrinsics, rgb_size, CV_16SC2, rgb_undistort_map1, rgb_undistort_map2); initUndistortRectifyMap(depth_intrinsics, depth_distortion, Mat(), depth_intrinsics, depth_size, CV_16SC2, depth_undistort_map1, depth_undistort_map2); }
bool CCapturador::LoadCapturesFromFilesUndisorted(string ruta,Mat& CameraMatrix,Mat& DistMatrix) { m_vCaptures.clear(); m_nPatterns = m_Options->m_nNumPatterns; for (int i = 0; i < m_nPatterns + m_Options->m_nNumFringes * 2; i++) { std::ostringstream oss; oss << ruta; if (i < 10) oss << "0"; oss << i << ".jpg"; string temp = oss.str(); Mat capture = imread(oss.str(), 1); if (capture.empty()) return false; Mat gray; cv::cvtColor(capture, gray, CV_BGR2GRAY); Mat view, rview, map1, map2; initUndistortRectifyMap(CameraMatrix, DistMatrix, Mat(), getOptimalNewCameraMatrix(CameraMatrix, DistMatrix, gray.size(), 1, gray.size(), 0), gray.size(), CV_16SC2, map1, map2); remap(gray, rview, map1, map2, INTER_LINEAR); m_vCaptures.push_back(rview); oss.clear(); } return true; }
Renderer::Renderer(const char *rootDirectory) { char file[200]; // create color program string src; sprintf(file, "%s/shader/color.vertexshader", rootDirectory); loadShaderCodeFromFile(file, src); compileShader(src, GL_VERTEX_SHADER, shader["color_vertex"]); sprintf(file, "%s/shader/color.fragmentshader", rootDirectory); loadShaderCodeFromFile(file, src); compileShader(src, GL_FRAGMENT_SHADER, shader["color_fragment"]); if (createRenderProgram(shader["color_vertex"], shader["color_fragment"], program["color"]) == GL_FALSE) return; MatrixID = glGetUniformLocation(program["color"], "MVP"); ViewMatrixID = glGetUniformLocation(program["color"], "ViewMatrix"); ModelMatrixID= glGetUniformLocation(program["color"], "ModelMatrix"); LightPositionID = glGetUniformLocation(program["color"], "LightPosition_worldspace"); Mat cameraMatrix, distCoeffs; sprintf(file, "%s/intrinsics.xml", rootDirectory); cv::FileStorage fs(file, cv::FileStorage::READ); fs["camera_matrix"] >> cameraMatrix; fs["distortion_coefficients"] >> distCoeffs; fs.release(); // calculate undistortion mapping Mat img_rectified, map1, map2; initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(), getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, cv::Size(WIDTH, HEIGHT), 1, cv::Size(WIDTH, HEIGHT), 0), cv::Size(WIDTH, HEIGHT), CV_16SC2, map1, map2); ViewMatrix = Matrix4f::Identity(); ViewMatrix.topRightCorner(3,1) << 0,0,-2; float n = 0.01; // near field float f = 100; // far field ProjectionMatrix << cameraMatrix.at<double>(0, 0) / cameraMatrix.at<double>(0, 2), 0.0, 0.0, 0.0, 0.0, cameraMatrix.at<double>(1, 1) / cameraMatrix.at<double>(1, 2), 0.0, 0.0, 0.0, 0.0, -(f + n) / (f - n), (-2.0f * f * n) / (f - n), 0.0, 0.0, -1.0, 0.0; K << cameraMatrix.at<double>(0, 0), cameraMatrix.at<double>(0, 1), cameraMatrix.at<double>(0, 2), cameraMatrix.at<double>(1, 0), cameraMatrix.at<double>(1, 1), cameraMatrix.at<double>(1, 2), cameraMatrix.at<double>(2, 0), cameraMatrix.at<double>(2, 1), cameraMatrix.at<double>(2, 2); cout << "K\n" << K << endl; Kinv = K.inverse(); // background ccolor glClearColor(0.0f, 0.0f, 0.0f, 0.0f); // Enable depth test glEnable(GL_DEPTH_TEST); // Accept fragment if it closer to the camera than the former one glDepthFunc(GL_LESS); // Cull triangles which normal is not towards the camera glEnable(GL_CULL_FACE); }
VisionNode::VisionNode() { cv::FileStorage fs("/home/roboy/workspace/mocap/src/intrinsics.xml", cv::FileStorage::READ); if (!fs.isOpened()) { ROS_ERROR("could not open intrinsics.xml"); return; } fs["camera_matrix"] >> cameraMatrix; fs["distortion_coefficients"] >> distCoeffs; fs.release(); ID = 0; // calculate undistortion mapping initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(), getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, cv::Size(WIDTH, HEIGHT), 1, cv::Size(WIDTH, HEIGHT), 0), cv::Size(WIDTH, HEIGHT), CV_16SC2, map1, map2); marker_position_pub = new ros::Publisher; *marker_position_pub = nh.advertise<communication::MarkerPosition>("/mocap/marker_position", 100); video_pub = new ros::Publisher; *video_pub = nh.advertise<sensor_msgs::Image>("/mocap/video", 1); camera_control_sub = nh.subscribe("/mocap/camera_control", 100, &VisionNode::camera_control, this); cameraID_pub = new ros::Publisher; *cameraID_pub = nh.advertise<std_msgs::Int32>("/mocap/cameraID", 100); // Publish the marker while (cameraID_pub->getNumSubscribers() < 1) { ros::Duration d(1.0); if (!ros::ok()) { return; } ROS_WARN_ONCE("Waiting for mocap plugin to subscribe to /mocap/cameraID"); d.sleep(); } ROS_INFO_ONCE("Found subscriber"); spinner = new ros::AsyncSpinner(1); spinner->start(); std_msgs::Int32 msg; msg.data = ID; cameraID_pub->publish(msg); img = cv::Mat(HEIGHT, WIDTH, CV_8UC4, img_data); img_rectified = cv::Mat(HEIGHT, WIDTH, CV_8UC4, img_rectified_data); t1 = std::chrono::high_resolution_clock::now(); StartCamera(WIDTH, HEIGHT, 90, CameraCallback); }
// input: cam_ID[0] for local camera ID // cam_ID[1] for reference camera ID void CStereoMatching::Rectify(int CamPair, cv::Mat &Q) { printf("\trectifying...\n"); cv::Size largestSize = m_data->m_LowestLevelSize*(1<<(m_data->m_PyrmNum-1)); vector<camera> & current_cam = m_data->cam[CamPair]; cv::Mat R,T; cv::Mat R_new[2]; cv::Rect validRoi[2]; R = current_cam[1].MatExtrinsics.colRange(0,3) * current_cam[0].MatExtrinsics.colRange(0,3).t(); T = -R*current_cam[0].MatExtrinsics.col(3) + current_cam[1].MatExtrinsics.col(3); cv::Mat distCoeffs = cv::Mat::zeros(4,1,CV_64FC1); cv::stereoRectify( current_cam[0].MatIntrinsics, distCoeffs, current_cam[1].MatIntrinsics, distCoeffs, m_data->m_OriginSize, R, T, R_new[0], R_new[1], current_cam[0].P, current_cam[1].P, Q, 0, -1, m_data->m_OriginSize, &validRoi[0], &validRoi[1]); R_final = current_cam[0].MatExtrinsics.colRange(0,3).t() * R_new[0].t(); T_final = -current_cam[0].MatExtrinsics.colRange(0,3).t() * current_cam[0].MatExtrinsics.col(3); cv::Mat Extrinsic_final = cv::Mat::zeros(4,4,CV_64FC1); Extrinsic_final.at<double>(3,3) = 1; Extrinsic_final(cv::Range(0,3), cv::Range(0,3)) = R_final.t(); Extrinsic_final(cv::Range(0,3), cv::Range(3,4)) = -R_final.t()*T_final; Q.at<double>(3,2) = -Q.at<double>(3,2); cv::Mat rmap[2], img; double scale = double(m_data->m_LowestLevelSize.width) / m_data->m_OriginSize.width * (1<<(m_data->m_PyrmNum-1)); for (int j=0; j<2; j++) { current_cam[j].P.rowRange(0,2) *= scale; initUndistortRectifyMap(current_cam[j].MatIntrinsics, distCoeffs, R_new[j], current_cam[j].P, largestSize, CV_16SC2, rmap[0], rmap[1]); current_cam[j].P = current_cam[j].P*Extrinsic_final; img = cv::imread(current_cam[j].image_name); if (img.empty() == true) { printf("read image %s error\n", current_cam[j].image_name.c_str()); return ; } // cv::medianBlur(img,img,5); cv::remap(img, current_cam[j].image, rmap[0], rmap[1], CV_INTER_LINEAR); img = cv::imread(current_cam[j].mask_name, CV_LOAD_IMAGE_GRAYSCALE); cv::remap(img, current_cam[j].mask, rmap[0], rmap[1], CV_INTER_LINEAR); cv::Mat element = cv::getStructuringElement( cv::MORPH_ELLIPSE, cv::Size( 3*(1<<(m_data->m_PyrmNum-1)), 3*(1<<(m_data->m_PyrmNum-1) ))); cv::erode(current_cam[j].mask, current_cam[j].mask, element); if (m_data->isoutput) { char filename[MAX_PATH]; sprintf(filename, "%d_%d.jpg", CamPair, current_cam[j].camID); cv::imwrite(filename, current_cam[j].image); // sprintf(filename, "%d_mask.jpg", current_cam[j].camID); // cv::imwrite(filename, current_cam[j].mask); } } }
/////////////////////////////////////////////////////// // Panel::LoadCalibration() // Description: Imports a previously created camera // calibration created by CalibrateCameraNoOutput. /////////////////////////////////////////////////////// void Panel::LoadCalibration(string sFilePath) { cout << "Loading Calibration" << endl; //! [file_read] Mat import_distortion_coefficients; Mat import_camera_matrix; Mat import_image_points; Size import_image_size; const string inputSettingsFile = sFilePath; FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings if (!fs.isOpened()) { cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl; // return -1; } fs["distortion_coefficients"] >> import_distortion_coefficients; fs["camera_matrix"] >> import_camera_matrix; fs["image_width"] >> import_image_size.width; fs["image_height"] >> import_image_size.height; fs["image_points"] >> import_image_points; fs.release(); // close Settings file //! [file_read] Mat view, rview, map1, map2; initUndistortRectifyMap(import_camera_matrix, import_distortion_coefficients, Mat(), getOptimalNewCameraMatrix(import_camera_matrix, import_distortion_coefficients, import_image_size, 1, import_image_size, 0), import_image_size, CV_16SC2, map1, map2); m_mainMap1 = map1; m_mainMap2 = map2; m_mainCameraMatrix = import_camera_matrix; m_mainDistCoeffs = import_distortion_coefficients; cout << "Calibration Loaded" << endl << endl; }
void ProjectorCalibration :: loadFromFile(const char* filename) { QFileInfo f (filename); ntk_throw_exception_if(!f.exists(), "Could not find calibration file."); cv::FileStorage calibration_file (filename, CV_STORAGE_READ); readMatrix(calibration_file, "proj_intrinsics", intrinsics); readMatrix(calibration_file, "proj_distortion", distortion); readMatrix(calibration_file, "R", R); readMatrix(calibration_file, "T", T); cv::Mat1i size_mat; readMatrix(calibration_file, "proj_size", size_mat); proj_size = cv::Size(size_mat(0,0), size_mat(0,1)); calibration_file.release(); pose = new Pose3D(); pose->toRightCamera(intrinsics, R, T); initUndistortRectifyMap(intrinsics, distortion, Mat(), intrinsics, proj_size, CV_16SC2, undistort_map1, undistort_map2); }
/// Calibrates the extrinsic parameters of the setup and saves it to an XML file /// Press'r' to retreive chessboard corners /// 's' to save and exit /// 'c' to exit without saving /// In: inputCapture1: video feed of camera 1 /// inputCapture2: video feed of camera 2 void CalibrateEnvironment(VideoCapture& inputCapture1, VideoCapture& inputCapture2) { Size boardSize; boardSize.width = BOARD_WIDTH; boardSize.height = BOARD_HEIGHT; const string fileName1 = "CameraIntrinsics1.xml"; const string fileName2 = "CameraIntrinsics2.xml"; cerr << "Attempting to open configuration files" << endl; FileStorage fs1(fileName1, FileStorage::READ); FileStorage fs2(fileName2, FileStorage::READ); Mat cameraMatrix1, cameraMatrix2; Mat distCoeffs1, distCoeffs2; fs1["Camera_Matrix"] >> cameraMatrix1; fs1["Distortion_Coefficients"] >> distCoeffs1; fs2["Camera_Matrix"] >> cameraMatrix2; fs2["Distortion_Coefficients"] >> distCoeffs2; if (cameraMatrix1.data == NULL || distCoeffs1.data == NULL || cameraMatrix2.data == NULL || distCoeffs2.data == NULL) { cerr << "Could not load camera intrinsics\n" << endl; } else{ cerr << "Loaded intrinsics\n" << endl; cerr << "Camera Matrix1: " << cameraMatrix1 << endl; cerr << "Camera Matrix2: " << cameraMatrix2 << endl; } Mat translation; Mat image1, image2; Mat mapX1, mapX2, mapY1, mapY2; inputCapture1.read(image1); Size imageSize = image1.size(); bool rotationCalibrated = false; while(inputCapture1.isOpened() && inputCapture2.isOpened()) { inputCapture1.read(image1); inputCapture2.read(image2); if (rotationCalibrated) { Mat t1 = image1.clone(); Mat t2 = image2.clone(); remap(t1, image1, mapX1, mapY1, INTER_LINEAR); remap(t2, image2, mapX2, mapY2, INTER_LINEAR); t1.release(); t2.release(); } char c = waitKey(15); if (c == 'c') { cerr << "Cancelling..." << endl; return; } else if(c == 's' && rotationCalibrated) { cerr << "Saving..." << endl; const string fileName = "EnvironmentCalibration.xml"; FileStorage fs(fileName, FileStorage::WRITE); fs << "Camera_Matrix_1" << getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1,imageSize, 0); fs << "Camera_Matrix_2" << getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0); fs << "Mapping_X_1" << mapX1; fs << "Mapping_Y_1" << mapY1; fs << "Mapping_X_2" << mapX2; fs << "Mapping_Y_2" << mapY2; fs << "Translation" << translation; cerr << "Exiting..." << endl; destroyAllWindows(); return; } else if(c == 's' && !rotationCalibrated) { cerr << "Exiting..." << endl; destroyAllWindows(); return; } else if (c == 'r') { BoardSettings s; s.boardSize.width = BOARD_WIDTH; s.boardSize.height = BOARD_HEIGHT; s.cornerNum = s.boardSize.width * s.boardSize.height; s.squareSize = (float)SQUARE_SIZE; vector<Point3f> objectPoints; vector<vector<Point2f> > imagePoints1, imagePoints2; if (RetrieveChessboardCorners(imagePoints1, imagePoints2, s, inputCapture1, inputCapture2, ITERATIONS)) { vector<vector<Point3f> > objectPoints(1); CalcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0]); objectPoints.resize(imagePoints1.size(),objectPoints[0]); Mat R, T, E, F; Mat rmat1, rmat2, rvec; double rms = stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E, F, TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01), CV_CALIB_FIX_INTRINSIC); cerr << "Original translation: " << T << endl; cerr << "Reprojection error reported by camera: " << rms << endl; // convert to rotation vector and then remove 90 degree offset Rodrigues(R, rvec); rvec.at<double>(1,0) -= 1.570796327; // equal rotation applied to each image...not necessarily needed rvec = rvec/2; Rodrigues(rvec, rmat1); invert(rmat1,rmat2); initUndistortRectifyMap(cameraMatrix1, distCoeffs1, rmat1, getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1,imageSize, 0), imageSize, CV_32FC1, mapX1, mapY1); initUndistortRectifyMap(cameraMatrix2, distCoeffs2, rmat2, getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0), imageSize, CV_32FC1, mapX2, mapY2); // reproject points in camera 1 since its rotation has been changed // need to find the translation between cameras based on the new camera 1 orientation for (int i = 0; i < imagePoints1.size(); i++) { Mat pointsMat1 = Mat(imagePoints1[i]); Mat pointsMat2 = Mat(imagePoints2[i]); undistortPoints(pointsMat1, imagePoints1[i], cameraMatrix1, distCoeffs1, rmat1,getOptimalNewCameraMatrix(cameraMatrix1, distCoeffs1, imageSize, 1, imageSize, 0)); undistortPoints(pointsMat2, imagePoints2[i], cameraMatrix2, distCoeffs2, rmat2,getOptimalNewCameraMatrix(cameraMatrix2, distCoeffs2, imageSize, 1, imageSize, 0)); pointsMat1.release(); pointsMat2.release(); } Mat temp1, temp2; R.release(); T.release(); E.release(); F.release(); // TODO: remove this // CalcBoardCornerPositions(s.boardSize, s.squareSize, objectPoints[0]); // objectPoints.resize(imagePoints1.size(),objectPoints[0]); stereoCalibrate(objectPoints, imagePoints1, imagePoints2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, imageSize, R, T, E, F, TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 1000, 0.01), CV_CALIB_FIX_INTRINSIC); // need to alter translation matrix so // [0] = distance in X direction (right from perspective of camera 1 is positive) // [1] = distance in Y direction (away from camera 1 is positive) // [2] = distance in Z direction (up is positive) translation = T; double temp = -translation.at<double>(0,0); translation.at<double>(0,0) = translation.at<double>(2,0); translation.at<double>(2,0) = temp; cerr << "Translation reproj: " << translation << endl; Rodrigues(R, rvec); cerr << "Reprojected rvec: " << rvec << endl; imagePoints1.clear(); imagePoints2.clear(); rvec.release(); rmat1.release(); rmat2.release(); R.release(); T.release(); E.release(); F.release(); rotationCalibrated = true; } } imshow("Image View1", image1); imshow("Image View2", image2); } }
int StereoCameraCalibration::handleStereoCalibration(struct StereoCameraCalibration::CalibrationConfigStruct struct_calibrationConfig) { //The capture for the image VideoCapture leftCameraCapture; VideoCapture rightCameraCapture; //Used to signify the capture portion is done int quitCapture = 0; int lastKey = -1; int key = -1; double totalAvgErr; double rmsErr; CalibrationStates currentCalibrationState = CALIBRATION_STATE_INIT; //A vector to hold the points found during calibration vector< vector<Point2f> >calibPoints[CAMERA_LOCATION_RIGHT+1]; //How many frames we have int frameCount = 0; clock_t prevTimeStamp = clock(); Mat currentLeftFrame; Mat currentRightFrame; Mat leftCaptureFrame; Mat rightCaptureFrame; Mat flippedLeftFrame; Mat flippedRightFrame; Mat cameraMatrix[2]; Mat distCoeffs[2]; Mat R, T, R1, R2, P1, P2, Q; Rect validRoi[2]; Mat rmap[2][2]; while(currentCalibrationState != CALIBRATION_STATE_DONE) { key = waitKey(33) & 0xff; switch(currentCalibrationState) { case CALIBRATION_STATE_INIT: { //Open the captures leftCameraCapture.open(struct_calibrationConfig.leftCameraId); rightCameraCapture.open(struct_calibrationConfig.rightCameraId); if(!(leftCameraCapture.set(CV_CAP_PROP_FPS, 30.0))) { cout << "Left frame rate set failed" << endl; } if(!(rightCameraCapture.set(CV_CAP_PROP_FPS, 30.0))) { cout << "Right frame rate set failed" << endl; } if(!(leftCameraCapture.set(CV_CAP_PROP_FRAME_WIDTH, 640))) { cout << "Left frame width set failed" << endl; } if(!(leftCameraCapture.set(CV_CAP_PROP_FRAME_HEIGHT, 480))) { cout << "Left frame height set failed" << endl; } if(!(rightCameraCapture.set(CV_CAP_PROP_FRAME_WIDTH, 640))) { cout << "Right frame width set failed" << endl; } if(!(rightCameraCapture.set(CV_CAP_PROP_FRAME_HEIGHT, 480))) { cout << "Right frame height set failed" << endl; } //Named window for calibration namedWindow("Current Left Calibration Image Raw", 1); namedWindow("Current Right Calibration Image Raw", 1); //Named window for calibration namedWindow("Current Left Calibration Image", 1); namedWindow("Current Right Calibration Image", 1); cout << "Starting calibration feature point capture." << endl; currentCalibrationState = CALIBRATION_STATE_SHOW_IMAGE_BEFORE_CAPTURE; break; } case CALIBRATION_STATE_SHOW_IMAGE_BEFORE_CAPTURE: { if(leftCameraCapture.isOpened() && rightCameraCapture.isOpened()) { leftCameraCapture >> leftCaptureFrame; rightCameraCapture >> rightCaptureFrame; leftCaptureFrame.copyTo(currentLeftFrame); rightCaptureFrame.copyTo(currentRightFrame); } //cout << "Left Frame Size" <<currentLeftFrame.rows <<"x" << currentLeftFrame.cols << endl; //cout << "Right Frame Size" <<currentRightFrame.rows <<"x" << currentRightFrame.cols << endl; if(!currentLeftFrame.data) { cout << "No Frame Data from Left Camera" << endl; return 2; } if(!currentRightFrame.data) { cout << "No Frame Data from Right Camera" << endl; return 2; } //currentFrame.copyTo(flippedFrame); flip(currentLeftFrame, flippedLeftFrame,1); flip(currentRightFrame, flippedRightFrame,1); imshow("Current Left Calibration Image Raw", flippedLeftFrame); imshow("Current Right Calibration Image Raw", flippedRightFrame); if(key == 's' || key == ' ') { prevTimeStamp = clock(); currentCalibrationState = CALIBRATION_STATE_IMAGE_CAPTURE; } if(key == 27) { currentCalibrationState =CALIBRATION_STATE_ABORT; } break; } case CALIBRATION_STATE_IMAGE_CAPTURE: { if(leftCameraCapture.isOpened() && rightCameraCapture.isOpened()) { leftCameraCapture >> leftCaptureFrame; leftCaptureFrame.copyTo(currentLeftFrame); rightCameraCapture >> rightCaptureFrame; rightCaptureFrame.copyTo(currentRightFrame); } if(!currentLeftFrame.data) { cout << "No Frame Data from Left Camera" << endl; return 2; } if(!currentRightFrame.data) { cout << "No Frame Data from Right Camera" << endl; return 2; } Mat flippedLeftFrame; Mat flippedRightFrame; //currentFrame.copyTo(flippedFrame); flip(currentLeftFrame, flippedLeftFrame,1); flip(currentRightFrame, flippedRightFrame,1); imshow("Current Left Calibration Image Raw", flippedRightFrame); imshow("Current Right Calibration Image Raw", flippedLeftFrame); Mat currentFrameGray[CAMERA_LOCATION_RIGHT+1]; cvtColor(currentLeftFrame,currentFrameGray[CAMERA_LOCATION_LEFT],CV_BGR2GRAY); cvtColor(currentRightFrame,currentFrameGray[CAMERA_LOCATION_RIGHT],CV_BGR2GRAY); vector<Point2f> currentFramePoints[CAMERA_LOCATION_RIGHT+1]; bool foundPoints[CAMERA_LOCATION_RIGHT+1]; //Find the corners of the Chessboard foundPoints[CAMERA_LOCATION_LEFT] = findChessboardCorners( currentFrameGray[CAMERA_LOCATION_LEFT], struct_calibrationConfig.boardSize, currentFramePoints[CAMERA_LOCATION_LEFT], CV_CALIB_CB_ADAPTIVE_THRESH & CV_CALIB_CB_FAST_CHECK & CV_CALIB_CB_NORMALIZE_IMAGE); foundPoints[CAMERA_LOCATION_RIGHT] = findChessboardCorners( currentFrameGray[CAMERA_LOCATION_RIGHT], struct_calibrationConfig.boardSize, currentFramePoints[CAMERA_LOCATION_RIGHT], CV_CALIB_CB_ADAPTIVE_THRESH & CV_CALIB_CB_FAST_CHECK & CV_CALIB_CB_NORMALIZE_IMAGE); if(foundPoints[CAMERA_LOCATION_LEFT] || foundPoints[CAMERA_LOCATION_RIGHT]) { if(foundPoints[CAMERA_LOCATION_LEFT]) { cornerSubPix( currentFrameGray[CAMERA_LOCATION_LEFT], currentFramePoints[CAMERA_LOCATION_LEFT], Size(5,5), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 )); } if(foundPoints[CAMERA_LOCATION_RIGHT]) { cornerSubPix( currentFrameGray[CAMERA_LOCATION_RIGHT], currentFramePoints[CAMERA_LOCATION_RIGHT], Size(5,5), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 )); } if(foundPoints[CAMERA_LOCATION_LEFT] && foundPoints[CAMERA_LOCATION_RIGHT]) { if(clock() - prevTimeStamp > struct_calibrationConfig.delay*1e-3*CLOCKS_PER_SEC) { prevTimeStamp = clock(); //blink = capture.isOpened(); bitwise_not(currentLeftFrame, currentLeftFrame); bitwise_not(currentRightFrame, currentRightFrame); calibPoints[CAMERA_LOCATION_LEFT].push_back(currentFramePoints[CAMERA_LOCATION_LEFT]); calibPoints[CAMERA_LOCATION_RIGHT].push_back(currentFramePoints[CAMERA_LOCATION_RIGHT]); frameCount++; } } if(foundPoints[CAMERA_LOCATION_LEFT]) { drawChessboardCorners( currentLeftFrame, struct_calibrationConfig.boardSize, Mat(currentFramePoints[CAMERA_LOCATION_LEFT]), foundPoints[CAMERA_LOCATION_LEFT] ); } if(foundPoints[CAMERA_LOCATION_RIGHT]) { drawChessboardCorners( currentRightFrame, struct_calibrationConfig.boardSize, Mat(currentFramePoints[CAMERA_LOCATION_RIGHT]), foundPoints[CAMERA_LOCATION_RIGHT] ); } cout << "Good Frames: " << frameCount << endl; imshow("Current Left Calibration Image", currentLeftFrame); imshow("Current Right Calibration Image", currentRightFrame); struct_calibrationConfig.imageSize = currentLeftFrame.size(); } else { imshow("Current Left Calibration Image", currentFrameGray[CAMERA_LOCATION_LEFT]); imshow("Current Right Calibration Image", currentFrameGray[CAMERA_LOCATION_RIGHT]); } if((key == 'd' || key == 'c' || key == 'r' || key == ' ') && frameCount >= 15) { currentCalibrationState =CALIBRATION_STATE_RUN; } if(key==27) { currentCalibrationState =CALIBRATION_STATE_ABORT; } break; } case CALIBRATION_STATE_RUN: { bool ok = runStereoCalibration(calibPoints, struct_calibrationConfig.imageSize, struct_calibrationConfig.boardSize, struct_calibrationConfig.squareSize, struct_calibrationConfig.aspectRatio, struct_calibrationConfig.flags, cameraMatrix, distCoeffs, R, T, R1, P1, R2, P2, Q, validRoi, rmsErr, totalAvgErr); printf("%s. avg reprojection error = %.2f\n", ok ? "Calibration succeeded" : "Calibration failed", totalAvgErr); if(ok) { cout << "Moving to save option." << endl; //Precompute maps for cv::remap() initUndistortRectifyMap(cameraMatrix[CAMERA_LOCATION_LEFT], distCoeffs[CAMERA_LOCATION_LEFT], R1, P1, struct_calibrationConfig.imageSize, CV_16SC2, rmap[CAMERA_LOCATION_LEFT][CAMERA_REMAP_AXIS_X], rmap[CAMERA_LOCATION_LEFT][CAMERA_REMAP_AXIS_Y]); initUndistortRectifyMap(cameraMatrix[CAMERA_LOCATION_RIGHT], distCoeffs[CAMERA_LOCATION_RIGHT], R2, P2, struct_calibrationConfig.imageSize, CV_16SC2, rmap[CAMERA_LOCATION_RIGHT][CAMERA_REMAP_AXIS_X], rmap[CAMERA_LOCATION_RIGHT][CAMERA_REMAP_AXIS_Y]); currentCalibrationState =CALIBRATION_STATE_SAVE; } else { cout << "Moving to waiting for image capture." << endl; currentCalibrationState =CALIBRATION_STATE_SHOW_IMAGE_BEFORE_CAPTURE; } break; } case CALIBRATION_STATE_SAVE: { key = displayRemappedStereoImages(leftCameraCapture, rightCameraCapture,validRoi,struct_calibrationConfig.imageSize,rmap); bool ok = false; vector<vector<Point2f> > noPoints[2]; if( key == 's' ) { this->imageSize = struct_calibrationConfig.imageSize; this->boardSize = struct_calibrationConfig.boardSize; this->squareSize = struct_calibrationConfig.squareSize; this ->aspectRatio = struct_calibrationConfig.aspectRatio; this->flags = struct_calibrationConfig.flags; this->cameraMatrix[0] = cameraMatrix[0].clone(); this->cameraMatrix[1] = cameraMatrix[1].clone(); this->distCoeffs[0] = distCoeffs[0].clone(); this->distCoeffs[1] = distCoeffs[1].clone(); this->R = R.clone(); this->T = T.clone(); this->R1 = R1.clone(); this->P1 = P1.clone(); this->R2 = R2.clone(); this->P2 = P2.clone(); this->Q = Q.clone(); this->validRoi[0] = validRoi[0]; this->validRoi[1] = validRoi[1]; this->rmsErr = rmsErr; this->imagePoints[0] = calibPoints[0]; this->imagePoints[1] = calibPoints[1]; this->totalAvgErr = totalAvgErr; saveStereoCameraParams( struct_calibrationConfig.outputFilename, struct_calibrationConfig.imageSize, struct_calibrationConfig.boardSize, struct_calibrationConfig.squareSize, struct_calibrationConfig.aspectRatio, struct_calibrationConfig.flags, cameraMatrix, distCoeffs, R, T, R1, P1, R2, P2, Q,validRoi, rmsErr, struct_calibrationConfig.writePoints ? calibPoints : noPoints, totalAvgErr ); cout << "Stereo Calibration Data Saved" << endl; currentCalibrationState =CALIBRATION_STATE_COMPLETE; } if(key == 27) { cout << "Move to abort" << endl; currentCalibrationState =CALIBRATION_STATE_ABORT; } break; } case CALIBRATION_STATE_ABORT: cout << "Calibration Aborted" << endl; currentCalibrationState =CALIBRATION_STATE_COMPLETE; break; case CALIBRATION_STATE_COMPLETE: cout << "Calibration Completed" << endl; currentCalibrationState =CALIBRATION_STATE_DONE; break; default: break; }//switch
void CameraCalibration::calibrate() { const string inputSettingsFile = "default.xml"; // Read the settings FileStorage fs( inputSettingsFile, FileStorage::READ ); if ( !fs.isOpened() ) { FileStorage fs( inputSettingsFile, FileStorage::WRITE ); fs.release(); cerr << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl; return; } else { s.read( fs["Settings"] ); // close Settings file fs.release(); } if ( !s.goodInput ) { cerr << "Invalid input detected. Application stopping." << endl; return; } vector<vector<Point2f> > imagePoints; Mat distCoeffs; Size imageSize; int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION; clock_t prevTimestamp = 0; const Scalar RED( 0, 0, 255 ), GREEN( 0, 255, 0 ); const char ESC_KEY = 27; for ( int i = 0; ; ++i ) { Mat view; bool blinkOutput = false; view = s.nextImage(); //----- If no more image, or got enough, then stop calibration and show result ------------- if ( mode == CAPTURING && imagePoints.size() >= (unsigned)s.nrFrames ) { if ( runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints ) ) { mode = CALIBRATED; } else { mode = DETECTION; } } // If no more images then run calibration, save and stop loop. if ( view.empty() ) { if ( imagePoints.size() > 0 ) { runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints); } break; } imageSize = view.size(); // Format input image. if ( s.flipVertical ) { flip( view, view, 0 ); } vector<Point2f> pointBuf; bool found; // Find feature points on the input format switch ( s.calibrationPattern ) { case Settings::CHESSBOARD: found = findChessboardCorners( view, s.boardSize, pointBuf, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE); break; case Settings::CIRCLES_GRID: found = findCirclesGrid( view, s.boardSize, pointBuf ); break; case Settings::ASYMMETRIC_CIRCLES_GRID: found = findCirclesGrid( view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID ); break; default: found = false; break; } // If done with success, improve the found corners' coordinate accuracy for chessboard if ( found ) { if ( s.calibrationPattern == Settings::CHESSBOARD ) { Mat viewGray; cvtColor( view, viewGray, COLOR_BGR2GRAY ); cornerSubPix( viewGray, pointBuf, Size( 11,11 ), Size(-1,-1), TermCriteria( TermCriteria::EPS + TermCriteria::MAX_ITER, 30, 0.1 ) ); } // For camera only take new samples after delay time if ( mode == CAPTURING && (!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC) ) { imagePoints.push_back( pointBuf ); prevTimestamp = clock(); blinkOutput = s.inputCapture.isOpened(); } // Draw the corners. drawChessboardCorners( view, s.boardSize, Mat( pointBuf ), found ); } //----------------------------- Output Text ------------------------------------------------ string msg = ( mode == CAPTURING ) ? "100/100" : mode == CALIBRATED ? "Calibrated" : "Press 'g' to start"; int baseLine = 0; Size textSize = getTextSize( msg, 1, 1, 1, &baseLine ); Point textOrigin( view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10 ); if ( mode == CAPTURING ) { if ( s.showUndistorsed ) { msg = format( "%d/%d Undist", (int)imagePoints.size(), s.nrFrames ); } else { msg = format( "%d/%d", (int)imagePoints.size(), s.nrFrames ); } } putText( view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED ); if ( blinkOutput ) { bitwise_not( view, view ); } //------------------------- Video capture output undistorted ------------------------------ if ( mode == CALIBRATED && s.showUndistorsed ) { Mat temp = view.clone(); undistort( temp, view, cameraMatrix, distCoeffs ); } //------------------------------ Show image and check for input commands ------------------- imshow( "Image View", view ); char key = (char)waitKey( s.inputCapture.isOpened() ? 50 : s.delay ); if ( key == ESC_KEY ) { break; } if ( key == 'u' && mode == CALIBRATED ) { s.showUndistorsed = !s.showUndistorsed; } if ( s.inputCapture.isOpened() && key == 'g' ) { mode = CAPTURING; imagePoints.clear(); } } // -----------------------Show the undistorted image for the image list ------------------------ if ( s.inputType == Settings::IMAGE_LIST && s.showUndistorsed ) { Mat view, rview, map1, map2; initUndistortRectifyMap( cameraMatrix, distCoeffs, Mat(), getOptimalNewCameraMatrix( cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0 ), imageSize, CV_16SC2, map1, map2 ); for ( int i = 0; i < (int)s.imageList.size(); i++ ) { view = imread( s.imageList[i], 1 ); if ( view.empty() ) { continue; } remap( view, rview, map1, map2, INTER_LINEAR ); imshow( "Image View", rview ); char c = (char)waitKey(); if ( c == ESC_KEY || c == 'q' || c == 'Q' ) { break; } } } }
int main_camera(Parameter *pParam) { Size boardSize(8,6); Size imageSize; int flags = CV_CALIB_FIX_ASPECT_RATIO; float squareSize = pParam->square_size; float aspectRatio = 1.f; Mat cameraMatrix; Mat distCoeffs; Mat frame; VideoCapture video; int flag_finish = 0; int result = 0; // read frames from data file vector<vector<Point2f> > imagePointsSet; vector<Point2f> imagePoints; vector<string> fileNames; fileNames.clear(); imagePointsSet.clear(); video.open(pParam->camera_index); if(video.isOpened() != true) { printf("fail to open camera %d\n", pParam->camera_index); video.open(-1); if(video.isOpened() != true) { printf("fail to open the default camera, please make sure an accessible camera is connected \n"); return -1; } else { printf("open the default camera\n"); } } while(flag_finish == 0) { Mat framecv; int found = 0; video >> frame; cvtColor(frame, framecv, CV_RGB2GRAY); imshow("framecv", framecv); // for oberserving input waitKey(10); if(framecv.cols <= 0 || framecv.rows <= 0 || framecv.data == NULL ) { printf("finish chess board detection \n"); break; } imagePoints.clear(); imageSize.width = framecv.cols; imageSize.height = framecv.rows; found = findChessboardCorners( framecv, boardSize, imagePoints, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE); if(found) { cornerSubPix( framecv, imagePoints, Size(11,11), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 )); if(found) { char key = 0; drawChessboardCorners( framecv, boardSize, Mat(imagePoints), found); imshow("framecv_xx", framecv); key = waitKey(0); if(key == 'c' || key == 'C') // not correct continue; else if(key == 'q' || key == 'Q') return 0; else if(key == 's' || key == 'S') flag_finish = 1; } printf("get a new chess board input\n"); imagePointsSet.push_back(imagePoints); } else { printf("no found usable chess board\n"); } } // calibrate cameras if(1) { vector<Mat> rvecs, tvecs; vector<float> reprojErrs; double totalAvgErr = 0; result = runCalibration(imagePointsSet, imageSize, boardSize, CHESSBOARD, squareSize, aspectRatio, flags, cameraMatrix, distCoeffs, rvecs, tvecs, reprojErrs, totalAvgErr); } // test calibrate if(1) { Mat view, rview, map1, map2; int i; Size imageSize2; imageSize2.width = 2 * imageSize.width; imageSize2.height = 2 * imageSize.height; initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(), getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0), imageSize, CV_16SC2, map1, map2); while(1) { char key = 0; video>>view; remap(view, rview, map1, map2, INTER_LINEAR); imshow("rview", rview); key = waitKey(0); if(key == 's') break; else if(key == 'q') break; } }
void RectifyStereo::calRemapMatrix(){ Mat K_00, D_00, R_rect_00, P_rect_00; Mat K_01, D_01, R_rect_01, P_rect_01; Size S_00; Mat R_01, T_01; // Matlab computes these K_00 = Mat(SQRT_SIZE_K, SQRT_SIZE_K, CV_64FC1, calibData.K_00); D_00 = Mat(1, SIZE_D, CV_64FC1, calibData.D_00); K_01 = Mat(SQRT_SIZE_K, SQRT_SIZE_K, CV_64FC1, calibData.K_01); D_01 = Mat(1, SIZE_D, CV_64FC1, calibData.D_01); S_00 = Size(calibData.S_00[0], calibData.S_00[1]); R_01 = Mat(3, 3, CV_64FC1, calibData.R_01); T_01 = Mat(3, 1, CV_64FC1, calibData.T_01); // Check if R_rect_0X, P_rect_0X are valide. bool checkRP = true; for (int i = 0; i < SIZE_R && checkRP; i++) { if (abs(calibData.R_rect_00[i]) > 5000 || abs(calibData.R_rect_01[i]) > 5000) checkRP = false; } for (int i = 0; i < SIZE_P_ROWS * SIZE_P_COLS && checkRP; i++) { if (abs(calibData.P_rect_00[i]) > 5000 || abs(calibData.P_rect_01[i]) > 5000) checkRP = false; } Size S_rect_00; // If valide, read them directly if (checkRP == true) { R_rect_00 = Mat(SQRT_SIZE_R, SQRT_SIZE_R, CV_64FC1, calibData.R_rect_00); P_rect_00 = Mat(SIZE_P_ROWS, SIZE_P_COLS, CV_64FC1, calibData.P_rect_00); R_rect_01 = Mat(SQRT_SIZE_R, SQRT_SIZE_R, CV_64FC1, calibData.R_rect_01); P_rect_01 = Mat(SIZE_P_ROWS, SIZE_P_COLS, CV_64FC1, calibData.P_rect_01); S_rect_00 = Size(calibData.S_rect_00[0], calibData.S_rect_00[1]); //int _y = calibData.ROI_00[1]; //if (_y < calibData.ROI_01[1]) // _y = calibData.ROI_01[1]; //int _y2 = calibData.ROI_00[1] + calibData.ROI_00[3]; //if (_y2 > calibData.ROI_01[1] + calibData.ROI_01[3]) // _y2 = calibData.ROI_01[1] + calibData.ROI_01[3]; //int _width = calibData.ROI_00[2]; //if (_width > calibData.ROI_01[2]) // _width = calibData.ROI_01[2]; //roi[0] = Rect(calibData.ROI_00[0], _y, _width, _y2 - _y); //roi[1] = Rect(calibData.ROI_01[0], _y, _width, _y2 - _y); } // If not, compute them else { S_rect_00 = S_00; Mat Q; // Rectify and compute the rest camera parameters stereoRectify(K_00, D_00, K_01, D_01, S_00, R_01, T_01, //input R_rect_00, R_rect_01, P_rect_00, P_rect_01, // output Q, CV_CALIB_ZERO_DISPARITY, 0, S_rect_00, &roi[0], &roi[1]); //output memcpy(calibData.R_rect_00, (double*)R_rect_00.data, sizeof(calibData.R_rect_00)); memcpy(calibData.P_rect_00, (double*)P_rect_00.data, sizeof(calibData.P_rect_00)); memcpy(calibData.R_rect_01, (double*)R_rect_01.data, sizeof(calibData.R_rect_01)); memcpy(calibData.P_rect_01, (double*)P_rect_01.data, sizeof(calibData.P_rect_01)); calibData.ROI_00[0] = roi[0].x, calibData.ROI_00[1] = roi[0].y; calibData.ROI_00[2] = roi[0].width, calibData.ROI_00[3] = roi[0].height; calibData.ROI_01[0] = roi[1].x, calibData.ROI_01[1] = roi[1].y; calibData.ROI_01[2] = roi[1].width, calibData.ROI_01[3] = roi[1].height; calibData.S_rect_00[0] = S_rect_00.width, calibData.S_rect_00[1] = S_rect_00.height; calibData.S_rect_01[0] = S_rect_00.width, calibData.S_rect_01[1] = S_rect_00.height; } if (!rectified) { //compute remapM[2][2] initUndistortRectifyMap( K_00, D_00, R_rect_00, P_rect_00, S_rect_00, CV_32FC1, remapM[0][0], remapM[0][1]); initUndistortRectifyMap( K_01, D_01, R_rect_01, P_rect_01, S_rect_00, CV_32FC1, remapM[1][0], remapM[1][1]); } }
void CalibrateThread::run() { Size boardSize, imageSize; float squareSize = 1.f, aspectRatio = 1.f; Mat cameraMatrix, distCoeffs; //QString of = ui->lineEdit_WorkFolder->text() + '/' + ui->lineEdit_OutputName->text(); QByteArray ba = strFileName.toLatin1(); const char* outputFilename = ba.data(); int i, nframes = 0; bool writeExtrinsics = true, writePoints = true; bool undistortImage = false; int flags = 0; VideoCapture capture; bool flipVertical = false; bool showUndistorted = false; int delay = 1000; clock_t prevTimestamp = 0; int mode = CAPTURING; vector<vector<Point2f> > imagePoints; vector<string> imageList; Pattern pattern = CHESSBOARD; boardSize.width = m_width; boardSize.height = m_height; squareSize = m_squaresize; //ui->textEdit_Information->append("\nCalibrating... Please wait for a while\n"); if( imgList.size() == 0 ) { //QMessageBox::warning(NULL, "Error", "Please choose a right folder"); emit popupErrorInformation("Please choose a right folder"); emit closeImageWindow(); return; } else { nframes = imgList.size(); } emit appendText("\nCalibrating... Please wait for a while\n"); //namedWindow( "Image View", 1 ); //bDialog->show(); for(i = 0; i < nframes ;i++) { //ui->textEdit_Information->append("Processing the image No. " + QString::number(i + 1)); emit appendText("Processing the image No. " + QString::number(i + 1)); Mat view, viewGray; bool blink = false; qDebug(imgList.at(i).toLatin1().data()); if( i < (int)imgList.size() ) view = imread(imgList.at(i).toLatin1().data(), 1); if(!view.data) { //QMessageBox::warning(NULL, "Error", ); emit popupErrorInformation("Could not open image files"); return; } imageSize = view.size(); if( flipVertical ) flip( view, view, 0 ); vector<Point2f> pointbuf; cvtColor(view, viewGray, CV_BGR2GRAY); bool found; switch( pattern ) { case CHESSBOARD: found = findChessboardCorners( view, boardSize, pointbuf, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE); break; case CIRCLES_GRID: found = findCirclesGrid( view, boardSize, pointbuf ); break; case ASYMMETRIC_CIRCLES_GRID: found = findCirclesGrid( view, boardSize, pointbuf, CALIB_CB_ASYMMETRIC_GRID ); break; } // improve the found corners' coordinate accuracy if( pattern == CHESSBOARD && found) cornerSubPix( viewGray, pointbuf, Size(11,11), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 )); if( mode == CAPTURING && found && (!capture.isOpened() || clock() - prevTimestamp > delay*1e-3*CLOCKS_PER_SEC) ) { imagePoints.push_back(pointbuf); prevTimestamp = clock(); blink = capture.isOpened(); } if(found) drawChessboardCorners( view, boardSize, Mat(pointbuf), found ); string msg = mode == CAPTURING ? "100/100" : mode == CALIBRATED ? "Calibrated" : "Press 'g' to start"; int baseLine = 0; Size textSize = getTextSize(msg, 1, 1, 1, &baseLine); Point textOrigin(view.cols - 2*textSize.width - 10, view.rows - 2*baseLine - 10); if( mode == CAPTURING ) { if(undistortImage) msg = format( "%d/%d Undist", (int)imagePoints.size(), nframes ); else msg = format( "%d/%d", (int)imagePoints.size(), nframes ); } putText( view, msg, textOrigin, 1, 1, mode != CALIBRATED ? Scalar(0,0,255) : Scalar(0,255,0)); if( blink ) bitwise_not(view, view); if( mode == CALIBRATED && undistortImage ) { Mat temp = view.clone(); undistort(temp, view, cameraMatrix, distCoeffs); } Mat rgb; cvtColor(view, rgb, CV_BGR2RGB); QImage image32 = QImage(rgb.cols, rgb.rows, QImage::Format_RGB32); QRgb value; for(int r = 0; r < rgb.rows; r++) { for(int c = 0; c < rgb.cols; c++) { value = qRgb(rgb.ptr<uchar>(0)[r * rgb.cols * 3 + c * 3 + 0], rgb.ptr<uchar>(0)[r * rgb.cols * 3 + c * 3 + 1], rgb.ptr<uchar>(0)[r * rgb.cols * 3 + c * 3 + 2]); image32.setPixel(c, r, value); } } emit showBitmap(image32); int key; if(i < nframes - 1) { key = 0xff & waitKey(500); } else { key = waitKey(500); } if( (key & 255) == 27 ) break; if( key == 'u' && mode == CALIBRATED ) undistortImage = !undistortImage; } if(imagePoints.size() > 0) { emit appendText("\n" + QString::number(imagePoints.size()) + " out of " + QString::number(nframes) + " images are effective!\n" ); runAndSave(outputFilename, imagePoints, imageSize, boardSize, pattern, squareSize, aspectRatio, flags, cameraMatrix, distCoeffs, writeExtrinsics, writePoints); } else { emit appendText("Calibrating is not successful! \nPlease change the parameters and try again!"); emit popupErrorInformation("Sorry, no enough points are detected! Please try another group of images!"); emit closeImageWindow(); return; } emit appendText("Calibrating Successfully! \nPlease go to the folder to check the out put files!"); emit closeImageWindow(); if( !capture.isOpened() && showUndistorted ) { Mat view, rview, map1, map2; initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(), getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0), imageSize, CV_16SC2, map1, map2); for( i = 0; i < (int)imageList.size(); i++ ) { view = imread(imageList[i], 1); if(!view.data) continue; //undistort( view, rview, cameraMatrix, distCoeffs, cameraMatrix ); remap(view, rview, map1, map2, INTER_LINEAR); imshow("Image View", rview); int c = 0xff & waitKey(); if( (c & 255) == 27 || c == 'q' || c == 'Q' ) break; } } return; }
int main_file(Parameter *pParam) { char *file_list = pParam->image_list;//"/home/sean/Pictures/calib_test1/dir.txt"; Size boardSize(8,6); Size imageSize; int flags = CV_CALIB_FIX_ASPECT_RATIO; float squareSize = pParam->square_size; float aspectRatio = 1.f; Mat cameraMatrix; Mat distCoeffs; int result = 0; // read frames from data file vector<vector<Point2f> > imagePointsSet; vector<Point2f> imagePoints; vector<string> fileNames; fileNames.clear(); imagePointsSet.clear(); getFileName(file_list, fileNames); for(unsigned int i = 0; i < fileNames.size(); i++) { Mat framecv; int found = 0; framecv = imread(fileNames[i].c_str(), 0); if(framecv.cols <= 0 || framecv.rows <= 0 || framecv.data == NULL ) { printf("finish chess board detection \n"); break; } imagePoints.clear(); imageSize.width = framecv.cols; imageSize.height = framecv.rows; found = findChessboardCorners( framecv, boardSize, imagePoints, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FAST_CHECK | CV_CALIB_CB_NORMALIZE_IMAGE); if(found) { cornerSubPix( framecv, imagePoints, Size(11,11), Size(-1,-1), TermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1 )); if(found) { drawChessboardCorners( framecv, boardSize, Mat(imagePoints), found); imshow("framecv_xx", framecv); waitKey(10); } imagePointsSet.push_back(imagePoints); } } // calibrate cameras if(1) { vector<Mat> rvecs, tvecs; vector<float> reprojErrs; double totalAvgErr = 0; result = runCalibration(imagePointsSet, imageSize, boardSize, CHESSBOARD, squareSize, aspectRatio, flags, cameraMatrix, distCoeffs, rvecs, tvecs, reprojErrs, totalAvgErr); } // test calibrate if(1) { Mat view, rview, map1, map2; int i; Size imageSize2; imageSize2.width = 2 * imageSize.width; imageSize2.height = 2 * imageSize.height; initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(), getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0), imageSize, CV_16SC2, map1, map2); for(i = 0; i < fileNames.size(); i++) { view = imread(fileNames[i].c_str()); remap(view, rview, map1, map2, INTER_LINEAR); imshow("rview", rview); waitKey(0); } } // save if(result == 0 ) { save_result(pParam->output_path, cameraMatrix, distCoeffs); } }
void CameraCalibration::StereoCalibration() { vector<vector<Point2f> > ImagePoints[2]; vector<vector<Point3f> > ObjectPoints(1); for(int i=0; i<BoardSize.height; i++) for(int j=0; j<BoardSize.width; j++) ObjectPoints.at(0).push_back(Point3f(float( j*SquareSize ), float( i*SquareSize ), 0)); ObjectPoints.resize(NumFrames, ObjectPoints[0]); vector<Mat> RVecs[2], TVecs[2]; double rms; for(int c_idx=0; c_idx<2; c_idx++) { for(int i=0; i < NumFrames; i++) { Mat img = imread(data_path+"/"+calib_params[c_idx].ImageList.at(i), CV_LOAD_IMAGE_COLOR); vector<Point2f> pointBuf; bool found = false; found = findChessboardCorners(img, BoardSize, pointBuf, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE); if(found) { Mat viewGray; cvtColor(img, viewGray, CV_BGR2GRAY); cornerSubPix(viewGray, pointBuf, Size(11, 11), Size(-1, -1), TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 100, 0.01)); //drawChessboardCorners(img, BoardSize, Mat(pointBuf), found); //namedWindow("Image View", CV_WINDOW_AUTOSIZE); //imshow("Image View", img); //waitKey(); } else { cerr << i << "th image cannot be found a pattern." << endl; exit(EXIT_FAILURE); } ImagePoints[c_idx].push_back(pointBuf); } calib_params[c_idx].DistCoeffs = Mat::zeros(8, 1, CV_64F); calib_params[c_idx].CameraMatrix = initCameraMatrix2D(ObjectPoints, ImagePoints[c_idx], ImageSize, 0); rms = calibrateCamera(ObjectPoints, ImagePoints[c_idx], ImageSize, calib_params[c_idx].CameraMatrix, calib_params[c_idx].DistCoeffs, RVecs[c_idx], TVecs[c_idx], CV_CALIB_USE_INTRINSIC_GUESS | CV_CALIB_FIX_K3 | CV_CALIB_FIX_K4 | CV_CALIB_FIX_K5 | CV_CALIB_FIX_K6); cout << c_idx << " camera re-projection error reported by calibrateCamera: "<< rms << endl; } rms = stereoCalibrate(ObjectPoints, ImagePoints[0], ImagePoints[1], calib_params[0].CameraMatrix, calib_params[0].DistCoeffs, calib_params[1].CameraMatrix, calib_params[1].DistCoeffs, ImageSize, stereo_params->R, stereo_params->T, stereo_params->E, stereo_params->F, TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5)); cout << "Stereo re-projection error reported by stereoCalibrate: " << rms << endl; cout << "Fundamental Matrix reprojection error: " << FundamentalMatrixQuality(ImagePoints[0], ImagePoints[1], calib_params[0].CameraMatrix, calib_params[1].CameraMatrix, calib_params[0].DistCoeffs, calib_params[1].DistCoeffs, stereo_params->F) << endl; // Transfer matrix from OpenCV Mat to Pangolin matrix CvtCameraExtrins(RVecs, TVecs); Timer PangolinTimer; // Stereo rectification stereoRectify(calib_params[0].CameraMatrix, calib_params[0].DistCoeffs, calib_params[1].CameraMatrix, calib_params[1].DistCoeffs, ImageSize, stereo_params->R, stereo_params->T, rect_params->LeftRot, rect_params->RightRot, rect_params->LeftProj, rect_params->RightProj, rect_params->Disp2DepthReProjMat, CALIB_ZERO_DISPARITY, // test later 1, // test later ImageSize, &rect_params->LeftRoi, &rect_params->RightRoi); cout << "\nStereo rectification using calibration spent: " << PangolinTimer.getElapsedTimeInMilliSec() << "ms." << endl; rect_params->isVerticalStereo = fabs(rect_params->RightProj.at<double>(1, 3)) > fabs(rect_params->RightProj.at<double>(0, 3)); // Get the rectification re-map index initUndistortRectifyMap(calib_params[0].CameraMatrix, calib_params[0].DistCoeffs, rect_params->LeftRot, rect_params->LeftProj, ImageSize, CV_16SC2, rect_params->LeftRMAP[0], rect_params->LeftRMAP[1]); initUndistortRectifyMap(calib_params[1].CameraMatrix, calib_params[1].DistCoeffs, rect_params->RightRot, rect_params->RightProj, ImageSize, CV_16SC2, rect_params->RightRMAP[0], rect_params->RightRMAP[1]); }
void StereoCalib(const vector<string>& imagelist, Size boardSize, bool useCalibrated=true, bool showRectified=true) { if( imagelist.size() % 2 != 0 ) { cout << "Error: the image list contains odd (non-even) number of elements\n"; return; } printf("board size: %d x %d", boardSize.width, boardSize.height); bool displayCorners = true; const int maxScale = 2; const float squareSize = 1.f; // Set this to your actual square size // ARRAY AND VECTOR STORAGE: vector<vector<Point2f> > imagePoints[2]; vector<vector<Point3f> > objectPoints; Size imageSize; int i, j, k, nimages = (int)imagelist.size()/2; imagePoints[0].resize(nimages); imagePoints[1].resize(nimages); vector<string> goodImageList; for( i = j = 0; i < nimages; i++ ) { for( k = 0; k < 2; k++ ) { const string& filename = imagelist[i*2+k]; Mat img = imread(filename, 0); if(img.empty()) break; if( imageSize == Size() ) imageSize = img.size(); else if( img.size() != imageSize ) { cout << "The image " << filename << " has the size different from the first image size. Skipping the pair\n"; break; } bool found = false; vector<Point2f>& corners = imagePoints[k][j]; for( int scale = 1; scale <= maxScale; scale++ ) { Mat timg; if( scale == 1 ) timg = img; else resize(img, timg, Size(), scale, scale); found = findChessboardCorners(timg, boardSize, corners, CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_NORMALIZE_IMAGE); if( found ) { if( scale > 1 ) { Mat cornersMat(corners); cornersMat *= 1./scale; } break; } } if( displayCorners ) { cout << filename << endl; Mat cimg, cimg1; cvtColor(img, cimg, CV_GRAY2BGR); drawChessboardCorners(cimg, boardSize, corners, found); double sf = 640./MAX(img.rows, img.cols); resize(cimg, cimg1, Size(), sf, sf); imshow("corners", cimg1); char c = (char)waitKey(500); if( c == 27 || c == 'q' || c == 'Q' ) //Allow ESC to quit exit(-1); } else putchar('.'); if( !found ) break; cornerSubPix(img, corners, Size(11,11), Size(-1,-1), TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 30, 0.01)); } if( k == 2 ) { goodImageList.push_back(imagelist[i*2]); goodImageList.push_back(imagelist[i*2+1]); j++; } } cout << j << " pairs have been successfully detected.\n"; nimages = j; if( nimages < 2 ) { cout << "Error: too little pairs to run the calibration\n"; return; } imagePoints[0].resize(nimages); imagePoints[1].resize(nimages); objectPoints.resize(nimages); for( i = 0; i < nimages; i++ ) { for( j = 0; j < boardSize.height; j++ ) for( k = 0; k < boardSize.width; k++ ) objectPoints[i].push_back(Point3f(j*squareSize, k*squareSize, 0)); } cout << "Running stereo calibration ...\n"; Mat cameraMatrix[2], distCoeffs[2]; cameraMatrix[0] = Mat::eye(3, 3, CV_64F); cameraMatrix[1] = Mat::eye(3, 3, CV_64F); Mat R, T, E, F; double rms = stereoCalibrate(objectPoints, imagePoints[0], imagePoints[1], cameraMatrix[0], distCoeffs[0], cameraMatrix[1], distCoeffs[1], imageSize, R, T, E, F, TermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5), CV_CALIB_FIX_ASPECT_RATIO + CV_CALIB_ZERO_TANGENT_DIST + //CV_CALIB_SAME_FOCAL_LENGTH + CV_CALIB_RATIONAL_MODEL + CV_CALIB_FIX_K3 + CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5); cout << "done with RMS error=" << rms << endl; // CALIBRATION QUALITY CHECK // because the output fundamental matrix implicitly // includes all the output information, // we can check the quality of calibration using the // epipolar geometry constraint: m2^t*F*m1=0 double err = 0; int npoints = 0; vector<Vec3f> lines[2]; for( i = 0; i < nimages; i++ ) { int npt = (int)imagePoints[0][i].size(); Mat imgpt[2]; for( k = 0; k < 2; k++ ) { imgpt[k] = Mat(imagePoints[k][i]); undistortPoints(imgpt[k], imgpt[k], cameraMatrix[k], distCoeffs[k], Mat(), cameraMatrix[k]); computeCorrespondEpilines(imgpt[k], k+1, F, lines[k]); } for( j = 0; j < npt; j++ ) { double errij = fabs(imagePoints[0][i][j].x*lines[1][j][0] + imagePoints[0][i][j].y*lines[1][j][1] + lines[1][j][2]) + fabs(imagePoints[1][i][j].x*lines[0][j][0] + imagePoints[1][i][j].y*lines[0][j][1] + lines[0][j][2]); err += errij; } npoints += npt; } cout << "average reprojection err = " << err/npoints << endl; // save intrinsic parameters FileStorage fs("calib/intrinsics.yml", CV_STORAGE_WRITE); if( fs.isOpened() ) { fs << "M1" << cameraMatrix[0] << "D1" << distCoeffs[0] << "M2" << cameraMatrix[1] << "D2" << distCoeffs[1]; fs.release(); } else cout << "Error: can not save the intrinsic parameters\n"; Mat R1, R2, P1, P2, Q; Rect validRoi[2]; stereoRectify(cameraMatrix[0], distCoeffs[0], cameraMatrix[1], distCoeffs[1], imageSize, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, 1, imageSize, &validRoi[0], &validRoi[1]); fs.open("calib/extrinsics.yml", CV_STORAGE_WRITE); if( fs.isOpened() ) { fs << "R" << R << "T" << T << "R1" << R1 << "R2" << R2 << "P1" << P1 << "P2" << P2 << "Q" << Q; fs.release(); } else cout << "Error: can not save the intrinsic parameters\n"; // OpenCV can handle left-right // or up-down camera arrangements bool isVerticalStereo = fabs(P2.at<double>(1, 3)) > fabs(P2.at<double>(0, 3)); // COMPUTE AND DISPLAY RECTIFICATION if( !showRectified ) return; Mat rmap[2][2]; // IF BY CALIBRATED (BOUGUET'S METHOD) if( useCalibrated ) { // we already computed everything } // OR ELSE HARTLEY'S METHOD else // use intrinsic parameters of each camera, but // compute the rectification transformation directly // from the fundamental matrix { vector<Point2f> allimgpt[2]; for( k = 0; k < 2; k++ ) { for( i = 0; i < nimages; i++ ) std::copy(imagePoints[k][i].begin(), imagePoints[k][i].end(), back_inserter(allimgpt[k])); } F = findFundamentalMat(Mat(allimgpt[0]), Mat(allimgpt[1]), FM_8POINT, 0, 0); Mat H1, H2; stereoRectifyUncalibrated(Mat(allimgpt[0]), Mat(allimgpt[1]), F, imageSize, H1, H2, 3); R1 = cameraMatrix[0].inv()*H1*cameraMatrix[0]; R2 = cameraMatrix[1].inv()*H2*cameraMatrix[1]; P1 = cameraMatrix[0]; P2 = cameraMatrix[1]; } //Precompute maps for cv::remap() initUndistortRectifyMap(cameraMatrix[0], distCoeffs[0], R1, P1, imageSize, CV_16SC2, rmap[0][0], rmap[0][1]); initUndistortRectifyMap(cameraMatrix[1], distCoeffs[1], R2, P2, imageSize, CV_16SC2, rmap[1][0], rmap[1][1]); Mat canvas; double sf; int w, h; if( !isVerticalStereo ) { sf = 600./MAX(imageSize.width, imageSize.height); w = cvRound(imageSize.width*sf); h = cvRound(imageSize.height*sf); canvas.create(h, w*2, CV_8UC3); } else { sf = 300./MAX(imageSize.width, imageSize.height); w = cvRound(imageSize.width*sf); h = cvRound(imageSize.height*sf); canvas.create(h*2, w, CV_8UC3); } for( i = 0; i < nimages; i++ ) { for( k = 0; k < 2; k++ ) { Mat img = imread(goodImageList[i*2+k], 0), rimg, cimg; remap(img, rimg, rmap[k][0], rmap[k][1], CV_INTER_LINEAR); cvtColor(rimg, cimg, CV_GRAY2BGR); Mat canvasPart = !isVerticalStereo ? canvas(Rect(w*k, 0, w, h)) : canvas(Rect(0, h*k, w, h)); resize(cimg, canvasPart, canvasPart.size(), 0, 0, CV_INTER_AREA); if( useCalibrated ) { Rect vroi(cvRound(validRoi[k].x*sf), cvRound(validRoi[k].y*sf), cvRound(validRoi[k].width*sf), cvRound(validRoi[k].height*sf)); rectangle(canvasPart, vroi, Scalar(0,0,255), 3, 8); } } if( !isVerticalStereo ) for( j = 0; j < canvas.rows; j += 16 ) line(canvas, Point(0, j), Point(canvas.cols, j), Scalar(0, 255, 0), 1, 8); else for( j = 0; j < canvas.cols; j += 16 ) line(canvas, Point(j, 0), Point(j, canvas.rows), Scalar(0, 255, 0), 1, 8); imshow("rectified", canvas); char c = (char)waitKey(); if( c == 27 || c == 'q' || c == 'Q' ) break; } }
void Calibration::updateUndistortion() { Mat undistortedCameraMatrix = getOptimalNewCameraMatrix(distortedIntrinsics.getCameraMatrix(), distCoeffs, distortedIntrinsics.getImageSize(), fillFrame ? 0 : 1); initUndistortRectifyMap(distortedIntrinsics.getCameraMatrix(), distCoeffs, Mat(), undistortedCameraMatrix, distortedIntrinsics.getImageSize(), CV_16SC2, undistortMapX, undistortMapY); undistortedIntrinsics.setup(undistortedCameraMatrix, distortedIntrinsics.getImageSize()); }
/////////////////////////////////////////////////////// // Panel::CalibrateCamera() Description /////////////////////////////////////////////////////// void Panel::CalibrateCamera(string sFilePath) { help(); //! [file_read] Settings s; const string inputSettingsFile = sFilePath; FileStorage fs(inputSettingsFile, FileStorage::READ); // Read the settings if (!fs.isOpened()) { cout << "Could not open the configuration file: \"" << inputSettingsFile << "\"" << endl; // return -1; } fs["Settings"] >> s; fs.release(); // close Settings file //! [file_read] //FileStorage fout("settings.yml", FileStorage::WRITE); // write config as YAML //fout << "Settings" << s; if (!s.goodInput) { cout << "Invalid input detected. Application stopping. " << endl; // return -1; } vector<vector<Point2f> > imagePoints; Mat cameraMatrix, distCoeffs; Size imageSize; int mode = s.inputType == Settings::IMAGE_LIST ? CAPTURING : DETECTION; clock_t prevTimestamp = 0; const Scalar RED(0, 0, 255), GREEN(0, 255, 0); const char ESC_KEY = 27; int counter = 1; //! [get_input] for (;;) { Mat view; bool blinkOutput = false; view = s.nextImage(); //----- If no more image, or got enough, then stop calibration and show result ------------- if (mode == CAPTURING && imagePoints.size() >= (size_t)s.nrFrames) { if (runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints)) mode = CALIBRATED; else mode = DETECTION; } if (view.empty()) // If there are no more images stop the loop { // if calibration threshold was not reached yet, calibrate now if (mode != CALIBRATED && !imagePoints.empty()) runCalibrationAndSave(s, imageSize, cameraMatrix, distCoeffs, imagePoints); break; } //! [get_input] imageSize = view.size(); // Format input image. if (s.flipVertical) flip(view, view, 0); //! [find_pattern] vector<Point2f> pointBuf; bool found; switch (s.calibrationPattern) // Find feature points on the input format { case Settings::CHESSBOARD: found = findChessboardCorners(view, s.boardSize, pointBuf, CALIB_CB_ADAPTIVE_THRESH | CALIB_CB_FAST_CHECK | CALIB_CB_NORMALIZE_IMAGE); break; case Settings::CIRCLES_GRID: found = findCirclesGrid(view, s.boardSize, pointBuf); break; case Settings::ASYMMETRIC_CIRCLES_GRID: found = findCirclesGrid(view, s.boardSize, pointBuf, CALIB_CB_ASYMMETRIC_GRID); break; default: found = false; break; } //! [find_pattern] //! [pattern_found] if (found) // If done with success, { // improve the found corners' coordinate accuracy for chessboard if (s.calibrationPattern == Settings::CHESSBOARD) { Mat viewGray; cvtColor(view, viewGray, COLOR_BGR2GRAY); cornerSubPix(viewGray, pointBuf, Size(11, 11), Size(-1, -1), TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 30, 0.1)); } if (mode == CAPTURING && // For camera only take new samples after delay time (!s.inputCapture.isOpened() || clock() - prevTimestamp > s.delay*1e-3*CLOCKS_PER_SEC)) { imagePoints.push_back(pointBuf); prevTimestamp = clock(); blinkOutput = s.inputCapture.isOpened(); } // Draw the corners. drawChessboardCorners(view, s.boardSize, Mat(pointBuf), found); } //! [pattern_found] //----------------------------- Output Text ------------------------------------------------ //! [output_text] string msg = (mode == CAPTURING) ? "100/100" : mode == CALIBRATED ? "Calibrated" : "Press 'g' to start"; int baseLine = 0; Size textSize = getTextSize(msg, 1, 1, 1, &baseLine); Point textOrigin(view.cols - 2 * textSize.width - 10, view.rows - 2 * baseLine - 10); if (mode == CAPTURING) { if (s.showUndistorsed) msg = format("%d/%d Undist", (int)imagePoints.size(), s.nrFrames); else msg = format("%d/%d", (int)imagePoints.size(), s.nrFrames); } putText(view, msg, textOrigin, 1, 1, mode == CALIBRATED ? GREEN : RED); if (blinkOutput) bitwise_not(view, view); //! [output_text] //------------------------- Video capture output undistorted ------------------------------ //! [output_undistorted] if (mode == CALIBRATED && s.showUndistorsed) { Mat temp = view.clone(); undistort(temp, view, cameraMatrix, distCoeffs); } //! [output_undistorted] //------------------------------ Show image and check for input commands ------------------- //! [await_input] namedWindow("Image View" + to_string(counter), WINDOW_NORMAL); resizeWindow("Image View" + to_string(counter), 640, 480); imshow("Image View" + to_string(counter), view); char key = (char)waitKey(s.inputCapture.isOpened() ? 50 : s.delay); cout << "Image " << to_string(counter) << " Completed" << endl; counter++; if (key == ESC_KEY) break; if (key == 'u' && mode == CALIBRATED) s.showUndistorsed = !s.showUndistorsed; if (s.inputCapture.isOpened() && key == 'g') { mode = CAPTURING; imagePoints.clear(); } //! [await_input] } // -----------------------Show the undistorted image for the image list ------------------------ //! [show_results] if (s.inputType == Settings::IMAGE_LIST && s.showUndistorsed) { Mat view, rview, map1, map2; initUndistortRectifyMap(cameraMatrix, distCoeffs, Mat(), getOptimalNewCameraMatrix(cameraMatrix, distCoeffs, imageSize, 1, imageSize, 0), imageSize, CV_16SC2, map1, map2); m_mainMap1 = map1; m_mainMap2 = map2; for (size_t i = 0; i < s.imageList.size(); i++) { view = imread(s.imageList[i], 1); if (view.empty()) continue; remap(view, rview, map1, map2, INTER_LINEAR); imshow("Image View", rview); char c = (char)waitKey(); if (c == ESC_KEY || c == 'q' || c == 'Q') break; } } //! [show_results] // return 0; }
int Depth_and_Disparity::stereo_match_and_disparity_init(int argc, char** argv, Size img_size) { if(argc < 3) { return 0; } //alg = STEREO_SGBM; alg = STEREO_BM; desired_param_set = 3; // called at the end of this function. minDisparityToCut = 35; // for the threshold cut //default here. runover later in ParamFunction. SADWindowSize = 0; numberOfDisparities = 0; no_display = false; scale = 1.f; target_image_size = img_size; last_disparity_depth= 0; for( int i = 1+2; i < argc; i++ ) { //if( argv[i][0] != '-' ) //{ // /* if( !img1_filename ) // img1_filename = argv[i]; // else // img2_filename = argv[i];*/ //} //else if( strncmp(argv[i], algorithm_opt, strlen(algorithm_opt)) == 0 ) { char* _alg = argv[i] + strlen(algorithm_opt); alg = strcmp(_alg, "bm") == 0 ? STEREO_BM : strcmp(_alg, "sgbm") == 0 ? STEREO_SGBM : strcmp(_alg, "hh") == 0 ? STEREO_HH : strcmp(_alg, "var") == 0 ? STEREO_VAR : -1; if( alg < 0 ) { printf("Command-line parameter error: Unknown stereo algorithm\n\n"); //print_help(); return -1; } } else if( strncmp(argv[i], maxdisp_opt, strlen(maxdisp_opt)) == 0 ) { if( sscanf( argv[i] + strlen(maxdisp_opt), "%d", &numberOfDisparities ) != 1 || numberOfDisparities < 1 || numberOfDisparities % 16 != 0 ) { printf("Command-line parameter error: The max disparity (--maxdisparity=<...>) must be a positive integer divisible by 16\n"); //print_help(); return -1; } } else if( strncmp(argv[i], blocksize_opt, strlen(blocksize_opt)) == 0 ) { if( sscanf( argv[i] + strlen(blocksize_opt), "%d", &SADWindowSize ) != 1 || SADWindowSize < 1 || SADWindowSize % 2 != 1 ) { printf("Command-line parameter error: The block size (--blocksize=<...>) must be a positive odd number\n"); return -1; } } else if( strncmp(argv[i], scale_opt, strlen(scale_opt)) == 0 ) { if( sscanf( argv[i] + strlen(scale_opt), "%f", &scale ) != 1 || scale < 0 ) { printf("Command-line parameter error: The scale factor (--scale=<...>) must be a positive floating-point number\n"); return -1; } } else if( strcmp(argv[i], nodisplay_opt) == 0 ) no_display = true; else if( strcmp(argv[i], "-i" ) == 0 ) intrinsic_filename = argv[++i]; else if( strcmp(argv[i], "-e" ) == 0 ) extrinsic_filename = argv[++i]; else if( strcmp(argv[i], "-o" ) == 0 ) disparity_filename = argv[++i]; else if( strcmp(argv[i], "-p" ) == 0 ) point_cloud_filename = argv[++i]; else { printf("Command-line parameter error: unknown option %d %s\n", i, argv[i]); //return -1; } } if( (intrinsic_filename != 0) ^ (extrinsic_filename != 0) ) { printf("Command-line parameter error: either both intrinsic and extrinsic parameters must be specified, or none of them (when the stereo pair is already rectified)\n"); return -1; } if( extrinsic_filename == 0 && point_cloud_filename ) { printf("Command-line parameter error: extrinsic and intrinsic parameters must be specified to compute the point cloud\n"); return -1; } if( intrinsic_filename ) { // reading intrinsic parameters FileStorage fs(intrinsic_filename, FileStorage::READ); if(!fs.isOpened()) { printf("Failed to open file %s\n", intrinsic_filename); return -1; } fs["M1"] >> M1; fs["D1"] >> D1; fs["M2"] >> M2; fs["D2"] >> D2; M1 *= scale; M2 *= scale; fs.release(); FileStorage fs2(extrinsic_filename, FileStorage::READ); ///fs2.open(extrinsic_filename, FileStorage::READ); if(!fs2.isOpened()) { printf("Failed to open file %s\n", extrinsic_filename); return -1; } fs2["R"] >> R; fs2["T"] >> T; fs2.release(); /* initialize rectification mapping */ /* when calibrated and saving matrices - i calibrated Left camera as img1 , Right camera as img2 */ Size img_ORG_size = Size(320,240); // the images size when calibrated the cameras stereoRectify( M1, D1, M2, D2, img_ORG_size, R, T, R1, R2, P1, P2, Q, CALIB_ZERO_DISPARITY, -1, img_size, &roi1, &roi2 ); initUndistortRectifyMap(M1, D1, R1, P1, img_size, CV_16SC2, map11, map12); initUndistortRectifyMap(M2, D2, R2, P2, img_size, CV_16SC2, map21, map22); } //// set algorithm and parameters : if (alg == STEREO_SGBM) { if (desired_param_set==1) set_SGBM_params_options_1(); else set_SGBM_params_options_2(); } else if (alg == STEREO_BM) { switch (desired_param_set) { case 1: set_BM_params_options_1(); break; case 2: set_BM_params_options_2(); break; case 3: set_BM_params_options_3(); break; default: break; } } else ; //ERROR about alg type return 0; }
bool CaliHelper::RectifyImages(Mat* CameraImages, int numberOfImages,string outputLocation) { CameraParams cameraL; CameraParams cameraR; cv::FileStorage fs("output.yaml", cv::FileStorage::READ); fs["Q"] >> cameraL.Q; fs ["C1"] >>cameraL.cameraMatrix; fs ["D1"] >>cameraL.distCoeffs; fs ["PRinv1"] >> cameraL.PRinv; fs ["P1"] >>cameraL.P; fs ["R1"]>> cameraL.R; fs ["C2"] >>cameraR.cameraMatrix; fs ["D2"]>>cameraR.distCoeffs; fs ["PRinv2"] >>cameraR.PRinv; fs ["P2"] >>cameraR.P; fs ["R2"] >> cameraR.R; fs.release(); // DO SOME RECTIFICATION cvNamedWindow("bal"); for(int i=0;i<numberOfImages*2;i++) { imshow("bal",CameraImages[i]); cvWaitKey(0); } cv::Mat imageIn, imageRectified; cv::Mat mapX, mapY; cv::Mat output; std::vector<CameraParams> mParams; mParams.emplace_back(cameraL); mParams.emplace_back(cameraR); int width = 800, height = 600; cv::Mat canvas(height, width*2, CV_8UC3); //number of cameras for (int j = 0; j < 2; j++){ CameraParams x = mParams[j]; initUndistortRectifyMap(x.cameraMatrix, x.distCoeffs, x.R, x.P, cv::Size(width, height), CV_32FC1, mapX, mapY); cout << "width,height = " << width << "," << height << endl; cout << "C1 " << x.cameraMatrix << endl; cout << "D1 " << x.distCoeffs << endl; cout << "P1 " << x.P << endl; //number of images for (int i = 0; i <numberOfImages; i++){ remap(CameraImages[i], imageRectified, mapX, mapY, CV_INTER_LANCZOS4); //sprintf(outputLocation.c_str(), "rectified/%d_%04d.png",j, i); //cv::imwrite(filename, imageRectified); //if (i == 9 && j == 0){ // for (int row = 0; row < height; row++) for (int col = 0; col < width; col++) for (int chan = 0; chan < 3; chan++) canvas.data[row*width*2*3 + col*3 + chan] = imageRectified.data[row*width*3+col*3 + chan]; //} else if (i == 9 && j == 1){ // for (int row = 0; row < height; row++) for (int col = 0; col < width; col++) for (int chan = 0; chan < 3; chan++) canvas.data[width*3 + row*width*2*3 + col*3 + chan] = imageRectified.data[row*width*3+col*3 + chan]; //} } } return false; }
bool CCapturador::CapturePatternsUndisorted(Mat& CameraMatrix,Mat& DistMatrix,int time) { m_vCaptures.clear(); VideoCapture cap(0); // open the default camera if (!cap.isOpened()) // check if we succeeded return -1; bool bMakeCapture = false; int nPatterns = 0; namedWindow("Camera", 1); namedWindow("Patrones"); /* HWND win_handle = FindWindow(0, L"Patrones"); if (!win_handle) { printf("Failed FindWindow\n"); } // Resize unsigned int flags = (SWP_SHOWWINDOW | SWP_NOSIZE | SWP_NOMOVE | SWP_NOZORDER); flags &= ~SWP_NOSIZE; unsigned int x = 0; unsigned int y = 0; unsigned int w = m_Options->m_nWidth; unsigned int h = m_Options->m_nHeight; SetWindowPos(win_handle, HWND_NOTOPMOST, x, y, w, h, flags); // Borderless SetWindowLong(win_handle, GWL_STYLE, GetWindowLong(win_handle, GWL_EXSTYLE) | WS_EX_TOPMOST); ShowWindow(win_handle, SW_SHOW); cvMoveWindow("Patrones", 0, 0); */ long A = getTickCount(); long B = getTickCount(); bool start = false; for (int i = 0;;) { imshow("Patrones", m_vPatterns[i]); Mat frame; cap >> frame; if (frame.empty()) return false; Mat view, rview, map1, map2; initUndistortRectifyMap(CameraMatrix, DistMatrix, Mat(), getOptimalNewCameraMatrix(CameraMatrix, DistMatrix, frame.size(), 1, frame.size(), 0), frame.size(), CV_16SC2, map1, map2); remap(frame, rview, map1, map2, INTER_LINEAR); imshow("Camera", rview); B = getTickCount(); int C = B - A; if ((C>time&&start) || waitKey(30) >= 0) { start = true; cout << "time = " << C << endl; A = getTickCount(); i++; Mat capture = frame.clone(); Mat gray; cv::cvtColor(capture, gray, CV_BGR2GRAY); m_vCaptures.push_back(gray); if (++nPatterns >= m_nPatterns) break; }; } cout << "Patrones capturados." << endl; cvDestroyWindow("Patrones"); return true; }