void LeapToCameraCalibrator::correctCameraPNP (ofxCv::Calibration & myCalibration){
    
    vector<cv::Point2f> imagePoints;
	vector<cv::Point3f> worldPoints;
    
	if( hasFingerCalibPoints ){
		for (int i=0; i<calibVectorImage.size(); i++){
			imagePoints.push_back(ofxCvMin::toCv(calibVectorImage[i]));
			worldPoints.push_back(ofxCvMin::toCv(calibVectorWorld[i]));
		}
	}
	else{
		cout << "not enough control points" << endl;
		return;
	}
    
    cout << "myCalibration.getDistCoeffs() " << myCalibration.getDistCoeffs() << endl;
    cout << "myCalibration.getUndistortedIntrinsics().getCameraMatrix() " << myCalibration.getUndistortedIntrinsics().getCameraMatrix() << endl;
    
    cv::Mat rvec(3,1,cv::DataType<double>::type);
    cv::Mat tvec(3,1,cv::DataType<double>::type);

    cv::solvePnP(worldPoints,imagePoints,
				 myCalibration.getUndistortedIntrinsics().getCameraMatrix(), myCalibration.getDistCoeffs(),
				 rvec, tvec, false );
  
    
	//     solvePnP( InputArray objectPoints, InputArray imagePoints,
	//     InputArray cameraMatrix, InputArray distCoeffs,
	//     OutputArray rvec, OutputArray tvec,
	//     bool useExtrinsicGuess=false );
		 
    
    calibrated = true;
    
	setExtrinsics(rvec, tvec);
	setIntrinsics(myCalibration.getUndistortedIntrinsics().getCameraMatrix());
    
	this->camera = myCalibration.getUndistortedIntrinsics().getCameraMatrix();
	
    cv::Mat distortionCoefficients = cv::Mat::zeros(5, 1, CV_64F);
    this->distortion = distortionCoefficients;

	
    this->rotation = rvec;
	this->translation = tvec;
	
	cout << "camera:\n";
	cout << this->camera << "\n";
	cout << "RVEC:\n";
	cout << rvec << "\n";
	cout << "TVEC:\n";
	cout << tvec << "\n";
	cout << "--------\n";
 
}
void LeapToCameraCalibrator::correctCamera(){

	//we have to intitialise a basic camera matrix for it to start with (this will get changed by the function call calibrateCamera
	
    vector<cv::Point2f> imagePoints;
	vector<cv::Point3f> worldPoints;
    
	if( hasFingerCalibPoints ){
		for(int i = 0; i < calibVectorImage.size(); i++){
			imagePoints.push_back(ofxCvMin::toCv(calibVectorImage[i]));
			worldPoints.push_back(ofxCvMin::toCv(calibVectorWorld[i]));
		}
	}
	else{
		cout << "not enough control points" << endl;
		return;
	}
    
    
	//initialise matrices
	cv::Mat cameraMatrix = cv::Mat::eye(3, 3, CV_64F);
	cameraMatrix.at<double>(0,0) = resolution.x * 1.62f; // default at 1.4 : 1.0f throw ratio
	cameraMatrix.at<double>(1,1) = resolution.y * 1.62f;
	cameraMatrix.at<double>(0,2) = resolution.x / 2.0f;
	cameraMatrix.at<double>(1,2) = resolution.y * 0.90f; // default at 40% lens offset
	cameraMatrix.at<double>(2,2) = 1;
	cv::Mat distortionCoefficients = cv::Mat::zeros(5, 1, CV_64F);
    
	vector<cv::Mat> rotations, translations;
    
	int flags = CV_CALIB_FIX_K1 | CV_CALIB_FIX_K2 | CV_CALIB_FIX_K3 | CV_CALIB_FIX_K4 | CV_CALIB_FIX_K5 | CV_CALIB_FIX_K6 |CV_CALIB_ZERO_TANGENT_DIST | CV_CALIB_USE_INTRINSIC_GUESS;

    
	float error = cv::calibrateCamera(vector<vector<cv::Point3f> >(1, worldPoints),
                                vector<vector<cv::Point2f> >(1, imagePoints),
                                cv::Size(resolution.x, resolution.y),
                                cameraMatrix,
                                distortionCoefficients,
                                rotations,
                                translations,
                                flags);
    
    cout << " cameraMatrix " << cameraMatrix << endl;
    
	calibrated = true;

	setExtrinsics(rotations[0], translations[0]);
	setIntrinsics(cameraMatrix);
    
	this->camera = cameraMatrix;
	this->distortion = distortionCoefficients;
	this->rotation = rotations[0];
	this->translation = translations[0];
     
}
예제 #3
0
void DxArchiverProxyInternal::sendIntrinsics(DxArchiverIntrinsics *intrinsics)
{
   setIntrinsics(*intrinsics);

   siteDxArchiverManager_->receiveIntrinsics(proxy_);
}