예제 #1
0
void camera_calib_guiDialog::show3Dview()
{
	mrpt::opengl::COpenGLScenePtr	scene = mrpt::opengl::COpenGLScene::Create();

	const unsigned int  check_size_x = edSizeX->GetValue();
	const unsigned int  check_size_y = edSizeY->GetValue();
	const double        check_squares_length_X_meters = 0.001 * atof( string(edLengthX->GetValue().mb_str()).c_str() );
	const double        check_squares_length_Y_meters = 0.001 * atof( string(edLengthY->GetValue().mb_str()).c_str() );

	if (!check_squares_length_X_meters || !check_squares_length_Y_meters) return;

	opengl::CGridPlaneXYPtr	grid = opengl::CGridPlaneXY::Create(0,check_size_x*check_squares_length_X_meters, 0, check_size_y*check_squares_length_Y_meters, 0, check_squares_length_X_meters );
	scene->insert( grid );

	for (TCalibrationImageList::iterator it=lst_images.begin();it!=lst_images.end();++it)
	{
		if (!it->second.detected_corners.empty())
		{
			mrpt::opengl::CSetOfObjectsPtr	cor = mrpt::opengl::stock_objects::CornerXYZ();
			cor->setName( mrpt::system::extractFileName(it->first) );
			cor->enableShowName(true);
			cor->setScale(0.1);
			cor->setPose( it->second.reconstructed_camera_pose );

			scene->insert( cor );
		}
	}

	//scene->insert( mrpt::opengl::stock_objects::CornerXYZ() );

	this->m_3Dview->m_openGLScene = scene;
	this->m_3Dview->Refresh();

}
예제 #2
0
// Update the listbox from lst_img_files
void camera_calib_guiDialog::updateListOfImages()
{
	lbFiles->Clear();
	for (TCalibrationImageList::iterator s=lst_images.begin();s!=lst_images.end();++s)
		lbFiles->Append(_U(s->first.c_str()));

	btnSaveImages->Enable( !lst_images.empty() );

	refreshDisplayedImage();
}
예제 #3
0
void camera_calib_guiDialog::OnbtnSaveImagesClick(wxCommandEvent& event)
{
	try
	{
		if (lst_images.empty()) return;

		wxDirDialog  dlg(this,_("Select the directory where to save the images"),_("."));

		if (dlg.ShowModal()==wxID_OK)
		{
			string dir = string(dlg.GetPath().mb_str());

			for (TCalibrationImageList::iterator s=lst_images.begin();s!=lst_images.end();++s)
				s->second.img_original.saveToFile( dir+string("/")+s->first+string(".png") );
		}
	}
	catch(std::exception &e)
	{
		wxMessageBox(_U(e.what()),_("Error"),wxICON_INFORMATION,this);
	}
}
예제 #4
0
// Shows the image selected in the listbox:
void camera_calib_guiDialog::refreshDisplayedImage()
{
	try
	{

	if (!lbFiles->GetCount())
	{
		// No images:
		return;
	}


	// Assure there's one selected:
	if (lbFiles->GetSelection()==wxNOT_FOUND)
		lbFiles->SetSelection(0);

	const string selFile = string(lbFiles->GetStringSelection().mb_str());

	TCalibrationImageList::iterator it = lst_images.find(selFile);
	if (it==lst_images.end()) return;

	// Zoom:
	const std::string strZoom = string(cbZoom->GetStringSelection().mb_str());
	const double zoomVal = 0.01*atof(strZoom.c_str());

	ASSERT_(zoomVal>0 && zoomVal<30)


	TImageSize  imgSizes(0,0);

	// Generate the images on-the-fly:
	CImage  imgOrgColor;
	it->second.img_original.colorImage(imgOrgColor);

	imgSizes = imgOrgColor.getSize();

	// Rectify:
	CImage  imgRect;
	if (camera_params.intrinsicParams(0,0)==0)
	{
		// Not calibrated yet:
		imgRect = imgOrgColor;
		imgRect.scaleImage(imgSizes.x*zoomVal,imgSizes.y*zoomVal, IMG_INTERP_NN);
	}
	else
	{
		imgOrgColor.rectifyImage(imgRect,camera_params);
		imgRect.scaleImage(imgSizes.x*zoomVal,imgSizes.y*zoomVal, IMG_INTERP_NN);

		// Draw reprojected:
		for (unsigned int k=0;k<it->second.projectedPoints_undistorted.size();k++)
			imgRect.drawCircle( zoomVal*it->second.projectedPoints_undistorted[k].x, zoomVal*it->second.projectedPoints_undistorted[k].y, 4, TColor(0,255,64) );

		imgRect.drawCircle( 10,10, 4, TColor(0,255,64) );
		imgRect.textOut(18,4,"Reprojected corners",TColor::white);
	}

	// Zoom images:
	imgOrgColor.scaleImage(imgSizes.x*zoomVal,imgSizes.y*zoomVal, IMG_INTERP_NN);

	imgSizes = imgOrgColor.getSize();

	CImage  imgCheck = imgOrgColor;

	// Draw the board:
	for (unsigned int k=0;k<it->second.detected_corners.size();k++)
	{
		imgCheck.cross(it->second.detected_corners[k].x *zoomVal, it->second.detected_corners[k].y *zoomVal, TColor::blue, '+', 3 );
		imgCheck.drawCircle( it->second.projectedPoints_distorted[k].x*zoomVal, it->second.projectedPoints_distorted[k].y*zoomVal, 4, TColor(0,255,64) );
	}
	imgCheck.drawCircle( 10,10, 4, TColor(0,255,64) );
	imgCheck.textOut(18,4,"Reprojected corners",TColor::white);

	imgCheck.cross( 10,30, TColor::blue, '+', 3 );
	imgCheck.textOut(18,24,"Detected corners",TColor::white);


	this->bmpOriginal->AssignImage( imgCheck );
	this->bmpRectified->AssignImage( imgRect );

	it->second.img_original.unload();


	// Plot:

	this->bmpOriginal->SetMinSize(wxSize(imgSizes.x,imgSizes.y));
	this->bmpOriginal->SetMaxSize(wxSize(imgSizes.x,imgSizes.y));
	this->bmpOriginal->SetSize(imgSizes.x,imgSizes.y);

	this->bmpRectified->SetMinSize(wxSize(imgSizes.x,imgSizes.y));
	this->bmpRectified->SetMaxSize(wxSize(imgSizes.x,imgSizes.y));
	this->bmpRectified->SetSize(imgSizes.x,imgSizes.y);

	this->FlexGridSizer11->RecalcSizes();
	this->FlexGridSizer14->RecalcSizes();

	//this->ScrolledWindow2->SetVirtualSize(wxSize(imgSizes.x,imgSizes.y));
	//this->ScrolledWindow3->SetVirtualSize(wxSize(imgSizes.x,imgSizes.y));
	this->ScrolledWindow2->SetScrollbars(1,1,imgSizes.x,imgSizes.y);
	this->ScrolledWindow3->SetScrollbars(1,1,imgSizes.x,imgSizes.y);

	this->bmpOriginal->Refresh(false);
	this->bmpRectified->Refresh(false);

	}
	catch(std::exception &e)
	{
		wxMessageBox(_U(e.what()),_("Error"),wxICON_INFORMATION,this);
	}
}
예제 #5
0
void camera_calib_guiDialog::clearListImages()
{
	lst_images.clear();
}
예제 #6
0
/* -------------------------------------------------------
				checkerBoardCameraCalibration
   ------------------------------------------------------- */
bool mrpt::vision::checkerBoardCameraCalibration(
	TCalibrationImageList &images,
	unsigned int  check_size_x,
	unsigned int  check_size_y,
	double        check_squares_length_X_meters,
	double        check_squares_length_Y_meters,
	mrpt::utils::TCamera    &out_camera_params,
	bool					normalize_image,
	double            *out_MSE,
	bool               skipDrawDetectedImgs,
	bool			   useScaramuzzaAlternativeDetector
	)
{
#if MRPT_HAS_OPENCV
	try
	{
		ASSERT_(check_size_x>2)
		ASSERT_(check_size_y>2)
		ASSERT_(check_squares_length_X_meters>0)
		ASSERT_(check_squares_length_Y_meters>0)

		if (images.size()<1)
		{
			std::cout << "ERROR: No input images." << std::endl;
			return false;
		}

		const unsigned CORNERS_COUNT = check_size_x * check_size_y;
		const CvSize check_size = cvSize(check_size_x, check_size_y);

		// First: Assure all images are loaded:
		// -------------------------------------------
		TCalibrationImageList::iterator it;
		for (it=images.begin();it!=images.end();it++)
		{
			TImageCalibData	&dat = it->second;

			dat.projectedPoints_distorted.clear();  // Clear reprojected points.
			dat.projectedPoints_undistorted.clear();

			// Skip if images are marked as "externalStorage":
			if (!dat.img_original.isExternallyStored() && !mrpt::system::extractFileExtension(it->first).empty()  )
			{
				if (!dat.img_original.loadFromFile(it->first))
					THROW_EXCEPTION_CUSTOM_MSG1("Error reading image: %s",it->first.c_str());

				dat.img_checkboard = dat.img_original;
				dat.img_rectified  = dat.img_original;
			}
		}

		// For each image, find checkerboard corners:
		// -----------------------------------------------
		//const unsigned int N = images.size();
		unsigned int i;

		vector<CvPoint2D64f> corners_list; //  = new CvPoint2D32f[ N * CORNERS_COUNT];
        unsigned int  valid_detected_imgs = 0;

        CvSize	imgSize = cvSize(0,0);

		vector<string>   pointsIdx2imageFile;

		int find_chess_flags = CV_CALIB_CB_ADAPTIVE_THRESH;
		if (normalize_image)
			find_chess_flags |= CV_CALIB_CB_NORMALIZE_IMAGE;

		for (i=0,it=images.begin();it!=images.end();it++,i++)
		{
			TImageCalibData	&dat = it->second;

			// Make grayscale version:
			const CImage img_gray( dat.img_original, FAST_REF_OR_CONVERT_TO_GRAY );

			if (!i)
			{
				imgSize = cvSize(img_gray.getWidth(),img_gray.getHeight() );
				out_camera_params.ncols = imgSize.width;
				out_camera_params.nrows = imgSize.height;
			}
			else
			{
				if (imgSize.height != (int)img_gray.getHeight() || imgSize.width != (int)img_gray.getWidth())
				{
					std::cout << "ERROR: All the images must have the same size" << std::endl;
					return false;
				}
			}

			// Try with expanded versions of the image if it fails to detect the checkerboard:
			unsigned corners_count;
			bool corners_found=false;

			corners_count = CORNERS_COUNT;

			corners_list.resize( (1+valid_detected_imgs)*CORNERS_COUNT );

			dat.detected_corners.clear();

			// Do detection (this includes the "refine corners" with cvFindCornerSubPix):
			vector<TPixelCoordf> detectedCoords;
			corners_found = mrpt::vision::findChessboardCorners(
				img_gray,
				detectedCoords,
				check_size_x,check_size_y,
				normalize_image, // normalize_image
				useScaramuzzaAlternativeDetector
				);

			corners_count = detectedCoords.size();

			// Copy the data into the overall array of coords:
			ASSERT_(detectedCoords.size()<=CORNERS_COUNT);
			for (size_t p=0;p<detectedCoords.size();p++)
			{
				corners_list[valid_detected_imgs*CORNERS_COUNT+p].x = detectedCoords[p].x;
				corners_list[valid_detected_imgs*CORNERS_COUNT+p].y = detectedCoords[p].y;
			}

			if (corners_found && corners_count!=CORNERS_COUNT)
				corners_found = false;


			cout << format("Img %s: %s\n", mrpt::system::extractFileName(it->first).c_str() , corners_found ? "DETECTED" : "NOT DETECTED" );

			if( corners_found )
			{
				// save the corners in the data structure:
				int x, y;
				unsigned int k;
				for( y = 0, k = 0; y < check_size.height; y++ )
					for( x = 0; x < check_size.width; x++, k++ )
						dat.detected_corners.push_back( mrpt::utils::TPixelCoordf( corners_list[valid_detected_imgs*CORNERS_COUNT + k].x, corners_list[valid_detected_imgs*CORNERS_COUNT + k].y ) );

				// Draw the checkerboard in the corresponding image:
				// ----------------------------------------------------
				if ( !dat.img_original.isExternallyStored() )
				{
					const int r = 4;
					CvPoint prev_pt= cvPoint(0, 0);
					const int line_max = 8;
					CvScalar line_colors[8];

					line_colors[0] = CV_RGB(255,0,0);
					line_colors[1] = CV_RGB(255,128,0);
					line_colors[2] = CV_RGB(255,128,0);
					line_colors[3] = CV_RGB(200,200,0);
					line_colors[4] = CV_RGB(0,255,0);
					line_colors[5] = CV_RGB(0,200,200);
					line_colors[6] = CV_RGB(0,0,255);
					line_colors[7] = CV_RGB(255,0,255);

					// Checkboad as color image:
					dat.img_original.colorImage( dat.img_checkboard );

					void *rgb_img = dat.img_checkboard.getAs<IplImage>();

					for( y = 0, k = 0; y < check_size.height; y++ )
					{
						CvScalar color = line_colors[y % line_max];
						for( x = 0; x < check_size.width; x++, k++ )
						{
							CvPoint pt;
							pt.x = cvRound(corners_list[valid_detected_imgs*CORNERS_COUNT + k].x);
							pt.y = cvRound(corners_list[valid_detected_imgs*CORNERS_COUNT + k].y);

							if( k != 0 ) cvLine( rgb_img, prev_pt, pt, color );

							cvLine( rgb_img,
									  cvPoint( pt.x - r, pt.y - r ),
									  cvPoint( pt.x + r, pt.y + r ), color );
							cvLine( rgb_img,
									  cvPoint( pt.x - r, pt.y + r),
									  cvPoint( pt.x + r, pt.y - r), color );
							cvCircle( rgb_img, pt, r+1, color );
							prev_pt = pt;
						}
					}
				}
			}

			if( corners_found )
			{
				pointsIdx2imageFile.push_back( it->first );
				valid_detected_imgs++;
			}

		} // end find corners

		std::cout << valid_detected_imgs << " valid images." << std::endl;
		if (!valid_detected_imgs)
		{
			std::cout << "ERROR: No valid images. Perhaps the checkerboard size is incorrect?" << std::endl;
			return false;
		}

		// ---------------------------------------------
		// Calculate the camera parameters
		// ---------------------------------------------
		// Was: FillEtalonObjPoints
		vector<CvPoint3D64f> obj_points( valid_detected_imgs * CORNERS_COUNT );

		{
			unsigned int y,k;
			for( y = 0, k = 0; y < check_size_y; y++ )
			{
				for( unsigned int x = 0; x < check_size_x; x++, k++ )
				{
					obj_points[k].x =-check_squares_length_X_meters * x;  // The "-" is for convenience, so the camera poses appear with Z>0
					obj_points[k].y = check_squares_length_Y_meters * y;
					obj_points[k].z = 0;
				}
			}
		}

		// Repeat the pattern N times:
		for( i= 1; i< valid_detected_imgs; i++ )
			memcpy( &obj_points[CORNERS_COUNT*i], &obj_points[0], CORNERS_COUNT*sizeof(obj_points[0]));

		// Number of detected points in each image (constant):
		vector<int> numsPoints(valid_detected_imgs, (int)CORNERS_COUNT );

		double proj_matrix[9];
		double distortion[4];

		vector<CvPoint3D64f> transVects( valid_detected_imgs );
        vector<double>        rotMatrs( valid_detected_imgs * 9 );

		// Calibrate camera
		cvCalibrateCamera_64d(
			valid_detected_imgs,
			&numsPoints[0],
			imgSize,
			&corners_list[0],
			&obj_points[0],
			distortion,
			proj_matrix,
			(double*)&transVects[0],
			&rotMatrs[0],
			0 );

		// Load matrix:
		out_camera_params.intrinsicParams = CMatrixDouble33( proj_matrix );

		out_camera_params.dist.assign(0);
		for (int i=0;i<4;i++)
			out_camera_params.dist[i] = distortion[i];

		// Load camera poses:
		for (i=0;i<valid_detected_imgs;i++)
		{
			const double *R = &rotMatrs[9*i];

			CMatrixDouble HM(4,4);
			HM.zeros();
			HM(3,3)=1;

			HM(0,0)=R[0];
			HM(1,0)=R[3];
			HM(2,0)=R[6];

			HM(0,1)=R[1];
			HM(1,1)=R[4];
			HM(2,1)=R[7];

			HM(0,2)=R[2];
			HM(1,2)=R[5];
			HM(2,2)=R[8];

			HM(0,3)=transVects[i].x;
			HM(1,3)=transVects[i].y;
			HM(2,3)=transVects[i].z;

			CPose3D p = CPose3D(0,0,0) - CPose3D(HM);

			images[ pointsIdx2imageFile[i] ].reconstructed_camera_pose = p;

			std::cout << "Img: " <<  mrpt::system::extractFileName(pointsIdx2imageFile[i])  << ": " << p << std::endl;
		}

		{
			CConfigFileMemory cfg;
			out_camera_params.saveToConfigFile("CAMERA_PARAMS",cfg);
			std::cout << cfg.getContent() << std::endl;
		}

		// ----------------------------------------
		// Undistort images:
		// ----------------------------------------
		for (it=images.begin();it!=images.end();it++)
		{
			TImageCalibData	&dat = it->second;
			if (!dat.img_original.isExternallyStored())
				dat.img_original.rectifyImage( dat.img_rectified, out_camera_params);
		} // end undistort

		// -----------------------------------------------
		// Reproject points to measure the fit sqr error
		// -----------------------------------------------
		double sqrErr = 0;

		for (i=0;i<valid_detected_imgs;i++)
		{
			TImageCalibData  & dat = images[ pointsIdx2imageFile[i] ];
			if (dat.detected_corners.size()!=CORNERS_COUNT) continue;

			// Reproject all the points into pixel coordinates:
			// -----------------------------------------------------

			vector<TPoint3D>  lstPatternPoints(CORNERS_COUNT);	// Points as seen from the camera:
			for (unsigned int p=0;p<CORNERS_COUNT;p++)
				lstPatternPoints[p] = TPoint3D(obj_points[p].x,obj_points[p].y,obj_points[p].z);

			vector<TPixelCoordf>	&projectedPoints = dat.projectedPoints_undistorted;
			vector<TPixelCoordf>	&projectedPoints_distorted = dat.projectedPoints_distorted;

			vision::pinhole::projectPoints_no_distortion(
				lstPatternPoints, // Input points
				dat.reconstructed_camera_pose,
				out_camera_params.intrinsicParams, // calib matrix
				projectedPoints  // Output points in pixels
				);

			vision::pinhole::projectPoints_with_distortion(
				lstPatternPoints, // Input points
				dat.reconstructed_camera_pose,
				out_camera_params.intrinsicParams, // calib matrix
				out_camera_params.getDistortionParamsAsVector(),
				projectedPoints_distorted// Output points in pixels
				);

			ASSERT_(projectedPoints.size()==CORNERS_COUNT);
			ASSERT_(projectedPoints_distorted.size()==CORNERS_COUNT);


			for (unsigned int p=0;p<CORNERS_COUNT;p++)
			{
				const double px = projectedPoints[p].x;
				const double py = projectedPoints[p].y;

				const double px_d = projectedPoints_distorted[p].x;
				const double py_d = projectedPoints_distorted[p].y;

				// Only draw if the img is NOT external:
				if (!dat.img_original.isExternallyStored())
				{
					if( px >= 0 && px < imgSize.width && py >= 0 && py < imgSize.height )
						cvCircle( dat.img_rectified.getAs<IplImage>(), cvPoint(px,py), 4, CV_RGB(0,0,255) );
				}

				// Accumulate error:
				sqrErr+=square(px_d-dat.detected_corners[p].x)+square(py_d-dat.detected_corners[p].y); // Error relative to the original (distorted) image.
			}
		}

		if (valid_detected_imgs)
		{
			sqrErr /= CORNERS_COUNT*valid_detected_imgs;
			std::cout << "Average err. of reprojection: " << sqrt(sqrErr) << " pixels" << std::endl;
		}
		if(out_MSE) *out_MSE = sqrt(sqrErr);

		return true;
	}
	catch(std::exception &e)
	{
		std::cout << e.what() << std::endl;
		return false;
	}
#else
	THROW_EXCEPTION("Function not available: MRPT was compiled without OpenCV")
#endif
}
예제 #7
0
/* -------------------------------------------------------
				checkerBoardCameraCalibration
   ------------------------------------------------------- */
bool mrpt::vision::checkerBoardCameraCalibration(
	TCalibrationImageList &images,
	unsigned int  check_size_x,
	unsigned int  check_size_y,
	double        check_squares_length_X_meters,
	double        check_squares_length_Y_meters,
	mrpt::utils::TCamera    &out_camera_params,
	bool					normalize_image,
	double            *out_MSE,
	bool               skipDrawDetectedImgs,
	bool			   useScaramuzzaAlternativeDetector
	)
{
	MRPT_UNUSED_PARAM(skipDrawDetectedImgs);
#if MRPT_HAS_OPENCV
	try
	{
		ASSERT_(check_size_x>2)
		ASSERT_(check_size_y>2)
		ASSERT_(check_squares_length_X_meters>0)
		ASSERT_(check_squares_length_Y_meters>0)

		if (images.size()<1)
		{
			std::cout << "ERROR: No input images." << std::endl;
			return false;
		}

		const unsigned CORNERS_COUNT = check_size_x * check_size_y;
		const CvSize check_size = cvSize(check_size_x, check_size_y);

		// Fill the pattern of expected pattern points only once out of the loop:
		vector<cv::Point3f> pattern_obj_points(CORNERS_COUNT);
		{
			unsigned int y,k;
			for( y = 0, k = 0; y < check_size_y; y++ )
			{
				for( unsigned int x = 0; x < check_size_x; x++, k++ )
				{
					pattern_obj_points[k].x =-check_squares_length_X_meters * x;  // The "-" is for convenience, so the camera poses appear with Z>0
					pattern_obj_points[k].y = check_squares_length_Y_meters * y;
					pattern_obj_points[k].z = 0;
				}
			}
		}

		// First: Assure all images are loaded:
		// -------------------------------------------
		TCalibrationImageList::iterator it;
		for (it=images.begin();it!=images.end();++it)
		{
			TImageCalibData	&dat = it->second;

			dat.projectedPoints_distorted.clear();  // Clear reprojected points.
			dat.projectedPoints_undistorted.clear();

			// Skip if images are marked as "externalStorage":
			if (!dat.img_original.isExternallyStored() && !mrpt::system::extractFileExtension(it->first).empty()  )
			{
				if (!dat.img_original.loadFromFile(it->first))
					THROW_EXCEPTION_CUSTOM_MSG1("Error reading image: %s",it->first.c_str());

				dat.img_checkboard = dat.img_original;
				dat.img_rectified  = dat.img_original;
			}
		}

		// For each image, find checkerboard corners:
		// -----------------------------------------------
		vector<vector<cv::Point3f> > objectPoints;  // final container for detected stuff
		vector<vector<cv::Point2f> > imagePoints;   // final container for detected stuff

		unsigned int  valid_detected_imgs = 0;
		vector<string>   pointsIdx2imageFile;
		cv::Size imgSize(0,0);

		unsigned int i;
		for (i=0,it=images.begin();it!=images.end();it++,i++)
		{
			TImageCalibData	&dat = it->second;

			// Make grayscale version:
			const CImage img_gray( dat.img_original, FAST_REF_OR_CONVERT_TO_GRAY );

			if (!i)
			{
				imgSize = cv::Size(img_gray.getWidth(),img_gray.getHeight() );
				out_camera_params.ncols = imgSize.width;
				out_camera_params.nrows = imgSize.height;
			}
			else
			{
				if (imgSize.height != (int)img_gray.getHeight() || imgSize.width != (int)img_gray.getWidth())
				{
					std::cout << "ERROR: All the images must have the same size" << std::endl;
					return false;
				}
			}

			// Try with expanded versions of the image if it fails to detect the checkerboard:
			unsigned corners_count;
			bool corners_found=false;

			corners_count = CORNERS_COUNT;

			vector<cv::Point2f> this_img_pts(CORNERS_COUNT);  // Temporary buffer for points, to be added if the points pass the checks.

			dat.detected_corners.clear();

			// Do detection (this includes the "refine corners" with cvFindCornerSubPix):
			vector<TPixelCoordf> detectedCoords;
			corners_found = mrpt::vision::findChessboardCorners(
				img_gray,
				detectedCoords,
				check_size_x,check_size_y,
				normalize_image, // normalize_image
				useScaramuzzaAlternativeDetector
				);

			corners_count = detectedCoords.size();

			// Copy the data into the overall array of coords:
			ASSERT_(detectedCoords.size()<=CORNERS_COUNT);
			for (size_t p=0;p<detectedCoords.size();p++)
			{
				this_img_pts[p].x = detectedCoords[p].x;
				this_img_pts[p].y = detectedCoords[p].y;
			}

			if (corners_found && corners_count!=CORNERS_COUNT)
				corners_found = false;

			cout << format("Img %s: %s\n", mrpt::system::extractFileName(it->first).c_str() , corners_found ? "DETECTED" : "NOT DETECTED" );

			if( corners_found )
			{
				// save the corners in the data structure:
				int x, y;
				unsigned int k;
				for( y = 0, k = 0; y < check_size.height; y++ )
					for( x = 0; x < check_size.width; x++, k++ )
						dat.detected_corners.push_back( mrpt::utils::TPixelCoordf( this_img_pts[k].x, this_img_pts[k].y ) );

				// Draw the checkerboard in the corresponding image:
				// ----------------------------------------------------
				if ( !dat.img_original.isExternallyStored() )
				{
					const int r = 4;
					CvPoint prev_pt= cvPoint(0, 0);
					const int line_max = 8;
					CvScalar line_colors[8];

					line_colors[0] = CV_RGB(255,0,0);
					line_colors[1] = CV_RGB(255,128,0);
					line_colors[2] = CV_RGB(255,128,0);
					line_colors[3] = CV_RGB(200,200,0);
					line_colors[4] = CV_RGB(0,255,0);
					line_colors[5] = CV_RGB(0,200,200);
					line_colors[6] = CV_RGB(0,0,255);
					line_colors[7] = CV_RGB(255,0,255);

					// Checkboad as color image:
					dat.img_original.colorImage( dat.img_checkboard );

					void *rgb_img = dat.img_checkboard.getAs<IplImage>();

					for( y = 0, k = 0; y < check_size.height; y++ )
					{
						CvScalar color = line_colors[y % line_max];
						for( x = 0; x < check_size.width; x++, k++ )
						{
							CvPoint pt;
							pt.x = cvRound(this_img_pts[k].x);
							pt.y = cvRound(this_img_pts[k].y);

							if( k != 0 ) cvLine( rgb_img, prev_pt, pt, color );

							cvLine( rgb_img,
									  cvPoint( pt.x - r, pt.y - r ),
									  cvPoint( pt.x + r, pt.y + r ), color );
							cvLine( rgb_img,
									  cvPoint( pt.x - r, pt.y + r),
									  cvPoint( pt.x + r, pt.y - r), color );
							cvCircle( rgb_img, pt, r+1, color );
							prev_pt = pt;
						}
					}
				}

				// Accept this image as good:
				pointsIdx2imageFile.push_back( it->first );
				imagePoints.push_back( this_img_pts );
				objectPoints.push_back( pattern_obj_points );

				valid_detected_imgs++;
			}

		} // end find corners

		std::cout << valid_detected_imgs << " valid images." << std::endl;
		if (!valid_detected_imgs)
		{
			std::cout << "ERROR: No valid images. Perhaps the checkerboard size is incorrect?" << std::endl;
			return false;
		}

		// ---------------------------------------------
		// Calculate the camera parameters
		// ---------------------------------------------
		// Calibrate camera
		cv::Mat cameraMatrix, distCoeffs(1,5,CV_64F,cv::Scalar::all(0));
		vector<cv::Mat> rvecs, tvecs;

		const double cv_calib_err = 
		cv::calibrateCamera(
			objectPoints,imagePoints,imgSize,
			cameraMatrix, distCoeffs, rvecs, tvecs,
			0 /*flags*/ );

		// Load matrix:
		out_camera_params.intrinsicParams = CMatrixDouble33( cameraMatrix.ptr<double>() );

		out_camera_params.dist.assign(0);
		for (int i=0;i<5;i++)
			out_camera_params.dist[i] = distCoeffs.ptr<double>()[i];

		// Load camera poses:
		for (i=0;i<valid_detected_imgs;i++)
		{
			CMatrixDouble44 HM;
			HM.zeros();
			HM(3,3)=1;

			{
				// Convert rotation vectors -> rot matrices:
				cv::Mat cv_rot;
				cv::Rodrigues(rvecs[i],cv_rot);

				Eigen::Matrix3d rot;
				cv::my_cv2eigen(cv_rot, rot );
				HM.block<3,3>(0,0) = rot;
			}

			{
				Eigen::Matrix<double,3,1> trans;
				cv::my_cv2eigen(tvecs[i], trans );
				HM.block<3,1>(0,3) = trans;
			}

			CPose3D p = CPose3D(0,0,0) - CPose3D(HM);

			images[ pointsIdx2imageFile[i] ].reconstructed_camera_pose = p;

			std::cout << "Img: " <<  mrpt::system::extractFileName(pointsIdx2imageFile[i])  << ": " << p << std::endl;
		}

		{
			CConfigFileMemory cfg;
			out_camera_params.saveToConfigFile("CAMERA_PARAMS",cfg);
			std::cout << cfg.getContent() << std::endl;
		}

		// ----------------------------------------
		// Undistort images:
		// ----------------------------------------
		for (it=images.begin();it!=images.end();++it)
		{
			TImageCalibData	&dat = it->second;
			if (!dat.img_original.isExternallyStored())
				dat.img_original.rectifyImage( dat.img_rectified, out_camera_params);
		} // end undistort

		// -----------------------------------------------
		// Reproject points to measure the fit sqr error
		// -----------------------------------------------
		double sqrErr = 0;

		for (i=0;i<valid_detected_imgs;i++)
		{
			TImageCalibData  & dat = images[ pointsIdx2imageFile[i] ];
			if (dat.detected_corners.size()!=CORNERS_COUNT) continue;

			// Reproject all the points into pixel coordinates:
			// -----------------------------------------------------
			vector<TPoint3D>  lstPatternPoints(CORNERS_COUNT);	// Points as seen from the camera:
			for (unsigned int p=0;p<CORNERS_COUNT;p++)
				lstPatternPoints[p] = TPoint3D(pattern_obj_points[p].x,pattern_obj_points[p].y,pattern_obj_points[p].z);

			vector<TPixelCoordf>	&projectedPoints = dat.projectedPoints_undistorted;
			vector<TPixelCoordf>	&projectedPoints_distorted = dat.projectedPoints_distorted;

			vision::pinhole::projectPoints_no_distortion(
				lstPatternPoints, // Input points
				dat.reconstructed_camera_pose,
				out_camera_params.intrinsicParams, // calib matrix
				projectedPoints  // Output points in pixels
				);

			vision::pinhole::projectPoints_with_distortion(
				lstPatternPoints, // Input points
				dat.reconstructed_camera_pose,
				out_camera_params.intrinsicParams, // calib matrix
				out_camera_params.getDistortionParamsAsVector(),
				projectedPoints_distorted// Output points in pixels
				);

			ASSERT_(projectedPoints.size()==CORNERS_COUNT);
			ASSERT_(projectedPoints_distorted.size()==CORNERS_COUNT);


			for (unsigned int p=0;p<CORNERS_COUNT;p++)
			{
				const double px = projectedPoints[p].x;
				const double py = projectedPoints[p].y;

				const double px_d = projectedPoints_distorted[p].x;
				const double py_d = projectedPoints_distorted[p].y;

				// Only draw if the img is NOT external:
				if (!dat.img_original.isExternallyStored())
				{
					if( px >= 0 && px < imgSize.width && py >= 0 && py < imgSize.height )
						cvCircle( dat.img_rectified.getAs<IplImage>(), cvPoint(px,py), 4, CV_RGB(0,0,255) );
				}

				// Accumulate error:
				sqrErr+=square(px_d-dat.detected_corners[p].x)+square(py_d-dat.detected_corners[p].y); // Error relative to the original (distorted) image.
			}
		}

		if (valid_detected_imgs)
		{
			sqrErr /= CORNERS_COUNT*valid_detected_imgs;
			std::cout << "Average err. of reprojection: " << sqrt(sqrErr) << " pixels (OpenCV error=" << cv_calib_err << ")\n";
		}
		if(out_MSE) *out_MSE = sqrt(sqrErr);

		return true;
	}
	catch(std::exception &e)
	{
		std::cout << e.what() << std::endl;
		return false;
	}
#else
	THROW_EXCEPTION("Function not available: MRPT was compiled without OpenCV")
#endif
}