예제 #1
0
void
camera_control_read_calibration(CameraControl* cc,
        char* intrinsicsFile, char* distortionFile)
{
    CvMat *intrinsic = (CvMat*) cvLoad(intrinsicsFile, 0, 0, 0);
    CvMat *distortion = (CvMat*) cvLoad(distortionFile, 0, 0, 0);

    if (cc->mapx) {
        cvReleaseImage(&cc->mapx);
    }
    if (cc->mapy) {
        cvReleaseImage(&cc->mapy);
    }

    if (intrinsic && distortion) {
        if (!cc->frame3chUndistort) {
            enum PSMove_Bool new_frame;
            cc->frame3chUndistort = cvCloneImage(
                    camera_control_query_frame(cc, NULL, NULL, &new_frame));
        }
        int width, height;
        get_metrics(&width, &height);
        cc->mapx = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
        cc->mapy = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
        cvInitUndistortMap(intrinsic, distortion, cc->mapx, cc->mapy);
        cc->focl_x = CV_MAT_ELEM(*intrinsic, float, 0, 0);
        cc->focl_y = CV_MAT_ELEM(*intrinsic, float, 1, 1);

        // TODO: Shouldn't we free intrinsic and distortion here?
    } else {
예제 #2
0
void
camera_control_read_calibration(CameraControl* cc,
        char* intrinsicsFile, char* distortionFile)
{
    CvMat *intrinsic = (CvMat*) cvLoad(intrinsicsFile, 0, 0, 0);
    CvMat *distortion = (CvMat*) cvLoad(distortionFile, 0, 0, 0);

    if (cc->mapx) {
        cvReleaseImage(&cc->mapx);
    }
    if (cc->mapy) {
        cvReleaseImage(&cc->mapy);
    }

    if (intrinsic && distortion) {
        if (!cc->frame3chUndistort) {
            cc->frame3chUndistort = cvCloneImage(
                    camera_control_query_frame(cc));
        }

        int width, height;
        get_metrics(&width, &height);

        cc->mapx = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);
        cc->mapy = cvCreateImage(cvSize(width, height), IPL_DEPTH_32F, 1);

        cvInitUndistortMap(intrinsic, distortion, cc->mapx, cc->mapy);

        // TODO: Shouldn't we free intrinsic and distortion here?
    } else {
        fprintf(stderr, "Warning: No lens calibration files found.\n");
    }
}
예제 #3
0
// инициализация данных для устранения дисторсии
void BaseVideoCapture::initUndistortion(const char* _intrinsics, const char* _distortion)
{
	if(!frame)
		return;

	if(!_intrinsics || !_distortion)
	{
		printf("[!][BaseVideoCapture] Error: empty filename!\n");
		return;
	}
	
	//
	// загрузка из файлов 
	//
	// калибровочные коэффициенты
	intrinsic = (CvMat*)cvLoad( _intrinsics );
	// коэффициенты дисторсии
	distortion = (CvMat*)cvLoad( _distortion );

	mapx = cvCreateImage( cvGetSize( frame ), IPL_DEPTH_32F, 1 );
	mapy = cvCreateImage( cvGetSize( frame ), IPL_DEPTH_32F, 1 );
	//
	// создание карты для устранения дисторсии
	cvInitUndistortMap( intrinsic, distortion, mapx, mapy );

	// картинка для сохранения исправленной картинки
	undist = cvCloneImage(frame);
}
예제 #4
0
		void init( const SP_Image& i ) {

			CvSize imageSize = cvSize( i->width, i->height );
			IplImage *MapX = cvCreateImage( imageSize, IPL_DEPTH_32F, 1);
			IplImage *MapY = cvCreateImage( imageSize, IPL_DEPTH_32F, 1);

			float kk[9]={0};
			for(int x=0; x<9; x++) kk[x]=0;

			kk[0] = i->intrinsicLinearCalibration[0];
			kk[2] =	i->intrinsicLinearCalibration[2];
			kk[4] =	i->intrinsicLinearCalibration[1];
			kk[5] =	i->intrinsicLinearCalibration[3];
			kk[8] = 1.;
			CvMat cvK = cvMat( 3, 3, CV_32FC1, kk);

			float kk_c[5];
			for(int x=0; x<4; x++) kk_c[x]= i->intrinsicNonlinearCalibration[x];
			kk_c[4] = 0;
			CvMat dist = cvMat( 5, 1, CV_32FC1, kk_c );

		#ifdef HAVE_CV_UNDISTORT_RECTIFY_MAP
				float feye[9] = {1,0,0,0,1,0,0,0,1};
				CvMat eye = cvMat(3,3,CV_32F, feye);
				cvInitUndistortRectifyMap(&cvK, &dist, NULL, &cvK, MapX, MapY);
		#else
				cvInitUndistortMap(&cvK, &dist, MapX, MapY);
		#endif

			distortionMaps[ i ] = make_pair( MapX, MapY );
		}
예제 #5
0
void mitk::UndistortCameraImage::SetUndistortImageFastInfo(float in_dF1, float in_dF2,
                                                 float in_dPrincipalX, float in_dPrincipalY,
                                                 float in_Dist[4], float ImageSizeX, float ImageSizeY)
{
  //create new matrix
  m_DistortionCoeffs  = cvCreateMat(4, 1, CV_64FC1);
  m_CameraMatrix      = cvCreateMat(3, 3, CV_64FC1);


  //set the camera matrix [fx 0 cx; 0 fy cy; 0 0 1].
  cvSetReal2D(m_CameraMatrix, 0, 0, in_dF1);
  cvSetReal2D(m_CameraMatrix, 0, 1, 0.0);
  cvSetReal2D(m_CameraMatrix, 0, 2, in_dPrincipalX);

  cvSetReal2D(m_CameraMatrix, 1, 0, 0.0);
  cvSetReal2D(m_CameraMatrix, 1, 1, in_dF2);
  cvSetReal2D(m_CameraMatrix, 1, 2, in_dPrincipalY);

  cvSetReal2D(m_CameraMatrix, 2, 0, 0.0);
  cvSetReal2D(m_CameraMatrix, 2, 1, 0.0);
  cvSetReal2D(m_CameraMatrix, 2, 2, 1.0);

  //set distortions coefficients
  cvSetReal1D(m_DistortionCoeffs, 0, in_Dist[0]);
  cvSetReal1D(m_DistortionCoeffs, 1, in_Dist[1]);
  cvSetReal1D(m_DistortionCoeffs, 2, in_Dist[2]);
  cvSetReal1D(m_DistortionCoeffs, 3, in_Dist[3]);

  m_mapX = cvCreateMat(ImageSizeY, ImageSizeX, CV_32FC1);
  m_mapY = cvCreateMat(ImageSizeY, ImageSizeX, CV_32FC1);

  //cv::initUndistortRectifyMap(m_CameraMatrix, m_DistortionCoeffs, m_mapX, m_mapY);
  cvInitUndistortMap(m_CameraMatrix, m_DistortionCoeffs, m_mapX, m_mapY);
}
예제 #6
0
void CamCalib::CalibrateCamera(CvSize &image_size)
{
	if (_intrinsic_matrix)  cvReleaseMat(&_intrinsic_matrix);
	if (_distortion_coeffs) cvReleaseMat(&_distortion_coeffs);

	if (_mapx) cvReleaseImage(&_mapx);
	if (_mapy) cvReleaseImage(&_mapy);

	_intrinsic_matrix  = cvCreateMat(3, 3, CV_32FC1);
	_distortion_coeffs = cvCreateMat(4, 1, CV_32FC1);

	// 초점 거리 비율을 1.0으로 설정하여 내부행렬을 초기화
	CV_MAT_ELEM( *_intrinsic_matrix, float, 0, 0 ) = 1.0f;
	CV_MAT_ELEM( *_intrinsic_matrix, float, 1, 1 ) = 1.0f;

	// 실제 카메라 보정함수
	cvCalibrateCamera2 (_object_points, _image_points, _point_counts, image_size, _intrinsic_matrix, _distortion_coeffs, NULL, NULL, 0);

	// 내부 행렬과 왜곡 계수를 파일로 저장
	cvSave("Intrinsics.xml", _intrinsic_matrix);
	cvSave("Distortion.xml", _distortion_coeffs);

	// 왜곡 제거를 위한 지도를 생성
	_mapx = cvCreateImage( image_size, IPL_DEPTH_32F, 1 );
	_mapy = cvCreateImage( image_size, IPL_DEPTH_32F, 1 );

	// 왜곡 제거를 위한 지도를 구성
	cvInitUndistortMap (_intrinsic_matrix, _distortion_coeffs, _mapx, _mapy);
}
예제 #7
0
void UCamRad::updateLensParams(const int height, const int width)
{
  double f, cx, cy, k1, k2, s;
  CvSize imageSize;
  bool redo;
  UMatrix4 intrinsic, distortion;
  //
  if (varIntrinsic != NULL)
  {
    f = ((varIntrinsic->getValued(0) + varIntrinsic->getDouble(4))/2.0);
    cx = varIntrinsic->getDouble(2);
    cy = varIntrinsic->getDouble(5);
    k1 = varDistortion->getDouble(0)/f;
    k2 = varDistortion->getDouble(0)/(f*f*f);
    s = getDev()->getPixelSize();
    redo = (par.getK1() != k1 or
             par.getK2() != k2 or
             par.getFocalLength() != f or
             par.getHx() != cx or
             par.getHy() != cy);
    if (redo)
      par.setCameraParameters(cx, cy, k1, k2, f, s);
    //
    //
    imageSize = cvSize(width, height);
    if (imageSize.width > 0 and imageSize.height > 0)
    { // and image size is available, so continue
      if (mapx == NULL)
      {
        mapx = cvCreateImage(imageSize, IPL_DEPTH_32F, 1);
        mapy = cvCreateImage(imageSize, IPL_DEPTH_32F, 1);
        redo = true;
      }
      else if (imageSize.width != mapx->width)
      {
        cvReleaseImage(&mapx);
        cvReleaseImage(&mapy);
        mapx = cvCreateImage(imageSize, IPL_DEPTH_32F, 1);
        mapy = cvCreateImage(imageSize, IPL_DEPTH_32F, 1);
        redo = true;
      }
      if (redo)
      {
        intrinsic.setMat(3, 3, varIntrinsic->getValuesd());
        distortion.setMat(5, 1, varDistortion->getValuesd());
#if (CV_MAJOR_VERSION >= 1)
        cvInitUndistortMap( intrinsic.cvMat(), distortion.cvMat(), mapx, mapy );
#else
        printf("UCamRad::updateLensParams: used cvInitUndistortMap(...) is incompatible with openCV version\n");
#endif
      }
    }
  }
}
// --------------------------------------------------------------------------
// main(Number of arguments, Argument values)
// Description  : This is the entry point of the program.
// Return value : SUCCESS:0  ERROR:-1
// --------------------------------------------------------------------------
int main(int argc, char **argv)
{
    // AR.Drone class
    ARDrone ardrone;

    // Initialize
    if (!ardrone.open()) {
        printf("Failed to initialize.\n");
        return -1;
    }

    // Image of AR.Drone's camera
    IplImage *image = ardrone.getImage();

    // Read intrincis camera parameters
    CvFileStorage *fs = cvOpenFileStorage("camera.xml", 0, CV_STORAGE_READ);
    CvMat *intrinsic = (CvMat*)cvRead(fs, cvGetFileNodeByName(fs, NULL, "intrinsic"));
    CvMat *distortion = (CvMat*)cvRead(fs, cvGetFileNodeByName(fs, NULL, "distortion"));

    // Initialize undistortion maps
    CvMat *mapx = cvCreateMat(image->height, image->width, CV_32FC1);
    CvMat *mapy = cvCreateMat(image->height, image->width, CV_32FC1);
    cvInitUndistortMap(intrinsic, distortion, mapx, mapy);

    // Main loop
    while (1) {
        // Key input
        int key = cvWaitKey(1);
        if (key == 0x1b) break;

        // Update
        if (!ardrone.update()) break;

        // Get an image
        image = ardrone.getImage();

        // Remap the image
        cvRemap(image, image, mapx, mapy);

        // Display the image
        cvShowImage("camera", image);
    }

    // Release the matrices
    cvReleaseMat(&mapx);
    cvReleaseMat(&mapy);
    cvReleaseFileStorage(&fs);

    // See you
    ardrone.close();

    return 0;
}
예제 #9
0
void CamCalib::LoadCalibParams (CvSize &image_size)
{
	// 파일로 저장된 내부행렬과 왜곡 계수를 불러오기
	_intrinsic_matrix  = (CvMat *)cvLoad("Intrinsics.xml");
	_distortion_coeffs = (CvMat *)cvLoad("Distortion.xml");

	if (_intrinsic_matrix && _distortion_coeffs) {
		// 왜곡 제거를 위한 지도를 생성
		_mapx = cvCreateImage( image_size, IPL_DEPTH_32F, 1 );
		_mapy = cvCreateImage( image_size, IPL_DEPTH_32F, 1 );

		// 왜곡 제거를 위한 지도를 구성
		cvInitUndistortMap (_intrinsic_matrix, _distortion_coeffs, _mapx, _mapy);

		_successes = _n_boards + 1;
	}
}
예제 #10
0
void Undistortion::initMap( int width, int height, int origin )
{
	// skip if already initialized with same values
	if ( m_pMapX && m_pMapX->width == width && m_pMapX->height == height )
		return;
		
	LOG4CPP_INFO( logger, "Creating undistortion map" );
	LOG4CPP_DEBUG( logger, "coeffs=" << m_coeffs );
	LOG4CPP_DEBUG( logger, "intrinsic=" << m_intrinsics );
	
	// copy ublas to OpenCV parameters
	CvMat* pCvCoeffs = cvCreateMat( 1, 8, CV_32FC1 );
	for ( unsigned i = 0; i < 8; i++ )
		pCvCoeffs->data.fl[ i ] = static_cast< float >( m_coeffs( i ) );
		
	CvMat* pCvIntrinsics = cvCreateMat( 3, 3, CV_32FC1 );
	for ( unsigned i = 0; i < 3; i++ )
		for ( unsigned j = 0; j < 3; j++ )
			cvmSet( pCvIntrinsics, i, j, m_intrinsics( i, j ) );
	
	// compensate for left-handed OpenCV coordinate frame
	for ( unsigned i = 0; i < 3; i++ )
		cvmSet( pCvIntrinsics, i, 2, cvmGet( pCvIntrinsics, i, 2 ) * -1 );
	
	// compensate if origin==0
	if ( !origin )
	{
		cvmSet( pCvIntrinsics, 1, 2, height - 1 - cvmGet( pCvIntrinsics,  1, 2 ) );
		cvmSet( pCvCoeffs, 0, 2, cvmGet( pCvCoeffs, 0, 2 ) * -1 );
	}

	// initialize the distortion map
	// create map images
	m_pMapX.reset( new Image( width, height, 1, IPL_DEPTH_32F ) );
	m_pMapY.reset( new Image( width, height, 1, IPL_DEPTH_32F ) );
	
	cvInitUndistortMap( pCvIntrinsics, pCvCoeffs, *m_pMapX, *m_pMapY );
	
	LOG4CPP_TRACE( logger, "origin=" << origin );
	Math::Vector< double, 2 > startPixel( *reinterpret_cast< float* >( m_pMapX->imageData ), *reinterpret_cast< float* >( m_pMapY->imageData ) );
	LOG4CPP_DEBUG( logger, "first pixel (0, 0) mapped from " << startPixel );
		
	// release data
	cvReleaseMat( &pCvCoeffs );
	cvReleaseMat( &pCvIntrinsics );
}	
예제 #11
0
IplImage* undistIm( IplImage* src, const char* filSetIntr , const char* filSetDist) {
	
	// ###############

	// EXAMPLE OF LOADING THESE MATRICES BACK IN:
	CvMat *intrinsic = (CvMat*)cvLoad( filSetIntr ); 
	
	CvMat *distortion = (CvMat*)cvLoad( filSetDist );
	
	// Build the undistort map that we will use for all
	// subsequent frames.
	//

	IplImage* mapx = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1 );
	IplImage* mapy = cvCreateImage( cvGetSize(src), IPL_DEPTH_32F, 1 );
	//new_intrinsic = cv::getDefaultNewCameraMatrix( intrinsic, cvGetSize( src ), false );

	cvInitUndistortMap( intrinsic,
		distortion,
		mapx,
		mapy
		);
	
	// Just run the camera to the screen, now showing the raw and
	// the undistorted image.
	//

	IplImage *t = cvCreateImage( cvGetSize(src), IPL_DEPTH_8U, 3) ; // = cvCloneImage(src);
		cvCopyImage( src, t );
		//cvShowImage( “Calibration”, image ); // Show raw image
		cvRemap( t, src, mapx, mapy ); // Undistort image
		cvReleaseImage( &t );
		cvReleaseImage( &mapx );
		cvReleaseImage( &mapy );
		
		//cvShowImage(“Undistort”, image); // Show corrected image
		
	// ##############


	return src;
}
//--------------------------------------------------------------------------------------
// Name: CVLoadCamCal	
// Desc: Loads camera calibration files
//--------------------------------------------------------------------------------------
bool GLCVUtils::CVLoadCamCal (void)
{
	_intrinsic = (CvMat*) cvLoad("Intrinsics.xml");
	_distortion = (CvMat*) cvLoad("Distortion.xml");

	if (! _intrinsic)
	{
		printf ("\nError loading Intrinsics.xml");
		return FALSE;
	}
	if (! _distortion)
	{
		printf ("\nError loading Distortion.xml");
		return FALSE;
	}

	// Build the undistort map that we will use for all
	// subsequent frames.
	_mapx= cvCreateImage (cvSize(TEXTURE_W, TEXTURE_H), IPL_DEPTH_32F, 1);
	_mapy = cvCreateImage (cvSize(TEXTURE_W, TEXTURE_H), IPL_DEPTH_32F, 1);
	cvInitUndistortMap (_intrinsic, _distortion, _mapx, _mapy);

	return TRUE;
}
예제 #13
0
CCamera::CCamera(std::string &cam_name, std::string &intrinsic, std::string &distortion, int width, int height)
{
  // capture images from camera
  if(cam_name.compare("normal") == 0)
    cam_type_ = CAM_NORMAL;
  else if(cam_name.compare("fire-i") == 0)
    cam_type_ = CAM_FIREI;
  else if(cam_name.compare("flea") == 0)
    cam_type_ = CAM_FLEA;
  else if(cam_name.compare("openni") == 0)
    cam_type_ = CAM_OPENNI;
  else if(cam_name.compare("ach") == 0)
    cam_type_ = CAM_ACH;
  else
  {
    std::cerr << "Unknown camera type. Force to use normal camera type" << std::endl;
    cam_type_ = CAM_NORMAL;
  }
    
  intrinsic_    = (CvMat*)cvLoad(intrinsic.c_str());
  distortion_   = (CvMat*)cvLoad(distortion.c_str());

  if(cam_type_ == CAM_OPENNI)
  {
    video_capture_.open(CV_CAP_OPENNI);
    if(!video_capture_.isOpened())
    {
      std::cerr << "Openni sensor cannot be openned" << std::endl;
      return;
    }

    video_capture_.set(CV_CAP_OPENNI_IMAGE_GENERATOR_OUTPUT_MODE, CV_CAP_OPENNI_VGA_30HZ);
    if(video_capture_.get(CV_CAP_OPENNI_IMAGE_GENERATOR_PRESENT))
    {
      int w = static_cast<int>(video_capture_.get(CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_FRAME_WIDTH));
      int h = static_cast<int>(video_capture_.get(CV_CAP_OPENNI_IMAGE_GENERATOR + CV_CAP_PROP_FRAME_HEIGHT));
      assert(w == width);
      assert(h == height);
    }
    else
    {
      std::cerr << "Device doesn't contain image generator." << std::endl;
      return;
    }
  }
  else if (cam_type_ != CAM_ACH)
  {
    capture_ = cvCaptureFromCAM(0);
    assert(capture_);

    double fps = cvGetCaptureProperty(capture_, CV_CAP_PROP_FPS);
    std::cout << "Camera FPS: " << (int)fps << std::endl;

    double msec = (double)cvGetCaptureProperty(capture_, CV_CAP_PROP_FOURCC);

    // Force to (width x height)
    cvSetCaptureProperty(capture_, CV_CAP_PROP_FRAME_WIDTH, width);
    cvSetCaptureProperty(capture_, CV_CAP_PROP_FRAME_HEIGHT, height);

    CvSize size = cvSize(
      (int)cvGetCaptureProperty(capture_, CV_CAP_PROP_FRAME_WIDTH),
      (int)cvGetCaptureProperty(capture_, CV_CAP_PROP_FRAME_HEIGHT)
      );

    assert(size.width == width);
    assert(size.height == height);
  }

  CvSize size = cvSize(width, height);
  // create the undistort map images
  img_mapx_ = cvCreateImage(size, IPL_DEPTH_32F, 1);
  img_mapy_ = cvCreateImage(size, IPL_DEPTH_32F, 1);
  // innit the undistort map
  cvInitUndistortMap(intrinsic_, distortion_, img_mapx_, img_mapy_);
  // create the main image
  img_input_ = cvCreateImage(size, IPL_DEPTH_8U, 3);
  img_edge_ = NULL;

  width_ = width;
  height_ = height;
  std::cout << "image resolution: (" << width_ << ", " << height_ << ")" << std::endl;

  color_ = true;
}
예제 #14
0
bool CvCalibFilter::Undistort( CvMat** srcarr, CvMat** dstarr )
{
    int i;

    if( !srcarr || !dstarr )
    {
        assert(0);
        return false;
    }

    if( isCalibrated )
    {
        for( i = 0; i < cameraCount; i++ )
        {
            if( srcarr[i] && dstarr[i] )
            {
                CvMat src_stub, *src;
                CvMat dst_stub, *dst;

                src = cvGetMat( srcarr[i], &src_stub );
                dst = cvGetMat( dstarr[i], &dst_stub );

                if( src->data.ptr == dst->data.ptr )
                {
                    if( !undistImg || undistImg->width != src->width ||
                        undistImg->height != src->height ||
                        CV_ARE_TYPES_EQ( undistImg, src ))
                    {
                        cvReleaseMat( &undistImg );
                        undistImg = cvCreateMat( src->height, src->width, src->type );
                    }

                    cvCopy( src, undistImg );
                    src = undistImg;
                }

            #if 1
                {
                CvMat A = cvMat( 3, 3, CV_32FC1, cameraParams[i].matrix );
                CvMat k = cvMat( 1, 4, CV_32FC1, cameraParams[i].distortion );

                if( !undistMap[i][0] || undistMap[i][0]->width != src->width ||
                     undistMap[i][0]->height != src->height )
                {
                    cvReleaseMat( &undistMap[i][0] );
                    cvReleaseMat( &undistMap[i][1] );
                    undistMap[i][0] = cvCreateMat( src->height, src->width, CV_32FC1 );
                    undistMap[i][1] = cvCreateMat( src->height, src->width, CV_32FC1 );
                    cvInitUndistortMap( &A, &k, undistMap[i][0], undistMap[i][1] );
                }

                cvRemap( src, dst, undistMap[i][0], undistMap[i][1] );
            #else
                cvUndistort2( src, dst, &A, &k );
            #endif
                }
            }
        }
    }
    else
    {
        for( i = 0; i < cameraCount; i++ )
        {
            if( srcarr[i] != dstarr[i] )
                cvCopy( srcarr[i], dstarr[i] );
        }
    }


    return true;
}
예제 #15
0
void bird_eye() {
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat*) cvLoad("Intrinsics.xml");
	CvMat *distortion = (CvMat*) cvLoad("Distortion.xml");

	IplImage* image = cvLoadImage("./Resource/bird-eye.jpg", 1);
	IplImage* gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	IplImage* mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage* mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	
	cvInitUndistortMap(
			intrinsic, 
			distortion, 
			mapx, 
			mapy
	);
	
	IplImage* t = cvCloneImage(image);

	cvRemap(t, image, mapx, mapy);
	
	cvNamedWindow("Chessboard");
	cvShowImage("Chessboard", image);
	int c = cvWaitKey(-1);
	CvPoint2D32f* corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	
	int found = cvFindChessboardCorners(
			image, 
			board_sz, 
			corners, 
			&corner_count, 
			CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
	);
	
	if(!found){
		printf("couldn't aquire chessboard!\n");
		return;
	}
	
	cvFindCornerSubPix(
			gray_image, 
			corners, 
			corner_count, 
			cvSize(11, 11), 
			cvSize(-1, -1), 
			cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1)
	);

	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;			objPts[0].y = 0;
	objPts[1].x = board_w - 1;	objPts[1].y = 0;
	objPts[2].x = 0;			objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;	objPts[3].y = board_h - 1;
	imgPts[0]   = corners[0];
	imgPts[1]	= corners[board_w - 1];
	imgPts[2]	= corners[(board_h - 1) * board_w];
	imgPts[3]	= corners[(board_h - 1) * board_w + board_w - 1];

	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	cvDrawChessboardCorners(
		image,
		board_sz,
		corners,
		corner_count,
		found
	);

	cvShowImage("Chessboard", image);

	CvMat *H = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	float z = 25;
	int key = 0;
	IplImage * birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");

	while(key != 27) {
		CV_MAT_ELEM(*H, float, 2, 2) = z;

		cvWarpPerspective(
			image,
			birds_image,
			H,
			CV_INTER_LINEAR| CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS
		);

		cvShowImage("Birds_Eye", birds_image);

		key = cvWaitKey();
		if(key == 'u') z += 0.5;
		if(key == 'd') z -= 0.5;
	}

	cvSave("H.xml", H);
}
예제 #16
0
void get_bird_eye(CvCapture* capture) {
	printf("haha\n");
	//get bird_eye picture
	cvNamedWindow("Get_birdeye");
	
	CvMat *intrinsic = (CvMat*) cvLoad("Intrinsics.xml");
	CvMat *distortion = (CvMat*) cvLoad("Distortion.xml");
	
	IplImage *image = cvQueryFrame(capture);
	IplImage *gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	
	int board_n = board_w * board_h;
	
	IplImage* mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage* mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	
	cvInitUndistortMap(
			intrinsic, 
			distortion, 
			mapx, 
			mapy
	);
	
	CvSize board_sz = cvSize(board_w, board_h);
	CvPoint2D32f* corners = new CvPoint2D32f[board_n];
	
	int frame = 0;
	int corner_count;
	bool catch_bird = false;


	while(!catch_bird) {
		IplImage *t = cvCloneImage(image);
		if(frame++ % board_dt == 0) {
			IplImage* t = cvCloneImage(image);
			cvRemap(t, image, mapx, mapy);
			
			int found = cvFindChessboardCorners(
					image, 
					board_sz, 
					corners, 
					&corner_count,
					CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
			);

			cvCvtColor(
					image, 
					gray_image, 
					CV_RGB2GRAY
			);
			
			cvFindCornerSubPix(
					gray_image, 
					corners, 
					corner_count, 
					cvSize(11,11), 
					cvSize(-1, -1), 
					cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)
			);
			
			cvDrawChessboardCorners(
					image, 
					board_sz, 
					corners, 
					corner_count, 
					found
			);
			
			cvShowImage("Get_birdeye", image);
			//get a good board, add to data
			if(corner_count == board_n) {
				catch_bird = true;
				printf("That's it!\n");
			}
		}
		int c;
		if(catch_bird) c = cvWaitKey(-1);
		else c = cvWaitKey(15);
		if(catch_bird && c == 's') {
			cvSaveImage("./Resource/bird-eye.jpg", t, 0);
			printf("save at ./Resource/bird-eye.jpg\n");
		}
		else catch_bird = false;
		if(c == 'p') {
			c = 0;
			while(c!='p' && c!= 27){
				c = cvWaitKey(250);
			}
		}
		image = cvQueryFrame(capture);
	}
}
예제 #17
0
int main(int argc, char *argv[])
{
	if (argc != 6) {
		printf("\nERROR: too few parameters\n");
		help();
		return -1;
	}
	help();
	//INPUT PARAMETERS:
	int board_w = atoi(argv[1]);
	int board_h = atoi(argv[2]);
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
	CvMat *distortion = (CvMat *) cvLoad(argv[4]);
	IplImage *image = 0, *gray_image = 0;
	if ((image = cvLoadImage(argv[5])) == 0) {
		printf("Error: Couldn't load %s\n", argv[5]);
		return -1;
	}
	gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	//UNDISTORT OUR IMAGE
	IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
	IplImage *t = cvCloneImage(image);
	cvRemap(t, image, mapx, mapy);

	//GET THE CHECKERBOARD ON THE PLANE
	cvNamedWindow("Checkers");
	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(image,
										board_sz,
										corners,
										&corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);
	if (!found) {
		printf
			("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
			 argv[5], corner_count, board_n);
		return -1;
	}
	//Get Subpixel accuracy on those corners
	cvFindCornerSubPix(gray_image, corners, corner_count,
					   cvSize(11, 11), cvSize(-1, -1),
					   cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
									  0.1));

	//GET THE IMAGE AND OBJECT POINTS:
	//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
	//That means corners are at: corners[r*board_w + c]
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;
	objPts[0].y = 0;
	objPts[1].x = board_w - 1;
	objPts[1].y = 0;
	objPts[2].x = 0;
	objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;
	objPts[3].y = board_h - 1;
	imgPts[0] = corners[0];
	imgPts[1] = corners[board_w - 1];
	imgPts[2] = corners[(board_h - 1) * board_w];
	imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];

	//DRAW THE POINTS in order: B,G,R,YELLOW
	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	//DRAW THE FOUND CHECKERBOARD
	cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
	cvShowImage("Checkers", image);

	//FIND THE HOMOGRAPHY
	CvMat *H = cvCreateMat(3, 3, CV_32F);
	CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
	float Z = 25;
	int key = 0;
	IplImage *birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");
	while (key != 27) {			//escape key stops
		CV_MAT_ELEM(*H, float, 2, 2) = Z;
//     cvInvert(H,H_invt); //If you want to invert the homography directly
//     cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
		//USE HOMOGRAPHY TO REMAP THE VIEW
		cvWarpPerspective(image, birds_image, H,
						  CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
						  CV_WARP_FILL_OUTLIERS);
		cvShowImage("Birds_Eye", birds_image);
		key = cvWaitKey();
		if (key == 'u')
			Z += 0.5;
		if (key == 'd')
			Z -= 0.5;
	}

	//SHOW ROTATION AND TRANSLATION VECTORS
	CvMat *image_points = cvCreateMat(4, 1, CV_32FC2);
	CvMat *object_points = cvCreateMat(4, 1, CV_32FC3);
	for (int i = 0; i < 4; ++i) {
		CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
		CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) =
			cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
	}

	CvMat *RotRodrigues = cvCreateMat(3, 1, CV_32F);
	CvMat *Rot = cvCreateMat(3, 3, CV_32F);
	CvMat *Trans = cvCreateMat(3, 1, CV_32F);
	cvFindExtrinsicCameraParams2(object_points, image_points,
								 intrinsic, distortion, RotRodrigues, Trans);
	cvRodrigues2(RotRodrigues, Rot);

	//SAVE AND EXIT
	cvSave("Rot.xml", Rot);
	cvSave("Trans.xml", Trans);
	cvSave("H.xml", H);
	cvInvert(H, H_invt);
	cvSave("H_invt.xml", H_invt);	//Bottom row of H invert is horizon line
	return 0;
}
RTC::ReturnCode_t ImageCalibration::onExecute(RTC::UniqueId ec_id)
{	
	
	board_sz = cvSize(m_board_w, m_board_h);
	
	//Calibrationパターンを計算する。
	if (m_inputImageIn.isNew()) {

		m_inputImageIn.read();

		if(m_keyIn.isNew()){
			m_keyIn.read();
			key = (int)m_key.data;
		}
		
		if(g_temp_w != m_inputImage.width || g_temp_h != m_inputImage.height){
		
			inputImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			outputImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			tempImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			undistortionImage = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			birds_image = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			
			intrinsicMatrix = cvCreateMat(3,3,CV_64FC1);
			distortionCoefficient = cvCreateMat(4,1,CV_64FC1);
			
			captureCount = 0;
			findFlag = 0;

			mapx = cvCreateImage( cvSize(m_inputImage.width, m_inputImage.height), IPL_DEPTH_32F, 1);
			mapy = cvCreateImage( cvSize(m_inputImage.width, m_inputImage.height), IPL_DEPTH_32F, 1);

			corners = new CvPoint2D32f[m_board_w * m_board_h];
			
			g_temp_w = m_inputImage.width;
			g_temp_h = m_inputImage.height;
		
		}

		//Capture開始する。
		memcpy(inputImage_buff->imageData,(void *)&(m_inputImage.pixels[0]), m_inputImage.pixels.length());

//		tempImage_buff = cvCloneImage(inputImage_buff);
		//OutPortに出力する。
		int len = inputImage_buff->nChannels * inputImage_buff->width * inputImage_buff->height;
		m_origImage.pixels.length(len);
		
		memcpy((void *)&(m_origImage.pixels[0]), inputImage_buff->imageData, len);
		m_origImage.width = inputImage_buff->width;
		m_origImage.height = inputImage_buff->height;

		m_origImageOut.write();
		
		//Capture確認用のWindowの生成
		//cvShowImage("Capture", inputImage_buff);
		cvWaitKey(1);
		
		//SpaceBarを押すとサンプル映像5枚を撮る
		if (key == ' ') {
			
			tempImage_buff = cvCloneImage(inputImage_buff);
			//映像を生成する
			IplImage *grayImage = cvCreateImage(cvGetSize(tempImage_buff), 8, 1);

			//行列の生成
			CvMat *worldCoordinates = cvCreateMat((m_board_w * m_board_h) * NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); //世界座標用行列
			CvMat *imageCoordinates = cvCreateMat((m_board_w * m_board_h) * NUM_OF_BACKGROUND_FRAMES ,2, CV_64FC1); //画像座標用行列
			CvMat *pointCounts = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 1, CV_32SC1); //コーナー数の行列
			CvMat *rotationVectors = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); //回転ベクトル
			CvMat *translationVectors = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); 

			//世界座標を設定する
			for (int i = 0; i < NUM_OF_BACKGROUND_FRAMES; i++){
				for ( int j = 0; j < (m_board_w * m_board_h); j++) {
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 0, (j % m_board_w) * UNIT);
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 1, (j / m_board_w) * UNIT);
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 2, 0.0);
				}
			}

			//コーナー数を設定
			for(int i = 0; i < NUM_OF_BACKGROUND_FRAMES; i++){
				cvSetReal2D(pointCounts, i, 0, (m_board_w * m_board_h));
			}
			
			//コーナーを検出する。
			findFlag = findCorners(tempImage_buff, grayImage, corners);

			if (findFlag != 0) {
			
				//コーナーをすべて検出した場合
				//映像座標を設定する。
				for (;;){
					for (int i = 0; i < (m_board_w * m_board_h); i++){
 						cvSetReal2D(imageCoordinates, captureCount * (m_board_w * m_board_h) + i, 0, corners[i].x);
						cvSetReal2D(imageCoordinates, captureCount * (m_board_w * m_board_h) + i, 1, corners[i].y);
					}
				
					captureCount++;    

					printf("%d枚目キャプチャしました\n", captureCount);

					if (captureCount == NUM_OF_BACKGROUND_FRAMES) {
						//設定した回数チェックパターンを撮った場合
						//カメラパラメータを推定する。
						cvCalibrateCamera2(
							worldCoordinates,
							imageCoordinates,
							pointCounts,
							cvGetSize(inputImage_buff),
							intrinsicMatrix,
							distortionCoefficient,
							rotationVectors,
							translationVectors,
							CALIBRATE_CAMERA_FLAG
						);
						
						//情報をTextとして出力
						printf("\nレンズ歪み係数\n");
						saveRenseMatrix(distortionCoefficient);
						printMatrix("%lf", distortionCoefficient);
						
						//m_renseParameter.data = renseParameters;
												
						printf("\n内部パラメータ\n");
						saveInternalParameterMatrix(intrinsicMatrix);
						printMatrix("%lf ", intrinsicMatrix);

						//m_internalParameter.data = internalParameter;
						
						captureCount = 0;
						break;
						
					}
				}
			}

			if (findFlag != 0){
				InParameter = 1;
			}else if (findFlag == 0) {
				InParameter = 0;
			}
			
			//メモリ解除
			cvReleaseMat(&worldCoordinates);
			cvReleaseMat(&imageCoordinates);
			cvReleaseMat(&pointCounts);
			cvReleaseMat(&rotationVectors);
			cvReleaseMat(&translationVectors);
			cvReleaseImage(&grayImage);

		}
		g_temp_w = m_inputImage.width;
		g_temp_h = m_inputImage.height;

	}
	//外部パターンを取得
	if (key == ' ' && m_inputImageIn.isNew() && InParameter == 1) {

		//行列の生成
		CvMat *worldCoordinates = cvCreateMat((m_board_w * m_board_h), 3, CV_64FC1); //世界座標用行列
		CvMat *imageCoordinates = cvCreateMat((m_board_w * m_board_h), 2, CV_64FC1); //画像座標用行列
		CvMat *rotationVectors = cvCreateMat(1, 3, CV_64FC1); //回転ベクトル
		CvMat *rotationMatrix = cvCreateMat(3, 3, CV_64FC1); //回転行列
		CvMat *translationVectors = cvCreateMat(1, 3, CV_64FC1); 

		//世界座標を設定する
		for (int i = 0; i < (m_board_w * m_board_h); i++){
			cvSetReal2D(worldCoordinates, i, 0, (i % m_board_w) * UNIT);
			cvSetReal2D(worldCoordinates, i, 1, (i / m_board_w) * UNIT);
			cvSetReal2D(worldCoordinates, i, 2, 0.0);
		}
	
		cvWaitKey( 1 );
	
		//	スペースキーが押されたら
		if ( findFlag != 0 ) {
			//	コーナーがすべて検出された場合
			//	画像座標を設定する
			for ( int i = 0; i < (m_board_w * m_board_h); i++ ){
				cvSetReal2D( imageCoordinates, i, 0, corners[i].x);
				cvSetReal2D( imageCoordinates, i, 1, corners[i].y);
			}

			//	外部パラメータを推定する
			cvFindExtrinsicCameraParams2(
				worldCoordinates,
				imageCoordinates,
				intrinsicMatrix,
				distortionCoefficient,
				rotationVectors,
				translationVectors
			);

			//	回転ベクトルを回転行列に変換する
			cvRodrigues2( rotationVectors, rotationMatrix, NULL );

			printf( "\n外部パラメータ\n" );
			printExtrinsicMatrix( rotationMatrix, translationVectors );
			saveExternalParameterMatrix(rotationMatrix, translationVectors);

			m_externalParameter.data = CORBA::string_dup(externalParameter);
			m_renseParameter.data = CORBA::string_dup(renseParameters);
			m_internalParameter.data = CORBA::string_dup(internalParameter);
						
		}
		//メモリを解放
		cvReleaseMat( &worldCoordinates );
		cvReleaseMat( &imageCoordinates );
		cvReleaseMat( &rotationVectors );
		cvReleaseMat( &rotationMatrix );
		cvReleaseMat( &translationVectors );
		
		//X,Y初期化
		cvInitUndistortMap(
			intrinsicMatrix,
			distortionCoefficient,
			mapx,
			mapy
		);
		//外部パラメータ確認フラグ
		outParameter = 1;
		key = 0;
				
	 }
	
	//内部外部パラメータの出力に成功したら
	if (InParameter == 1 && outParameter == 1) {

		//	レンズ歪みを補正した画像を生成する
		cvUndistort2(
			inputImage_buff,
			undistortionImage,
			intrinsicMatrix,
			distortionCoefficient
		);

		//cvShowImage("歪み補正", undistortionImage);

		//OutPortに補正映像を出力する。
		//int len = undistortionImage->nChannels * undistortionImage->width * undistortionImage->height;
		//m_calbImage.pixels.length(len);
		
		//歪み補正映像をOutPortとしてメモリコピーする。
		//memcpy((void *)&(m_calbImage.pixels[0]), undistortionImage->imageData, len);
		//m_calbImageOut.write();
		
		//鳥瞰図の座標設定
		objPts[0].x = 0;					objPts[0].y = 0;
		objPts[1].x = m_board_w-1;			objPts[1].y = 0;
		objPts[2].x = 0;					objPts[2].y = m_board_h-1;
		objPts[3].x = m_board_w-1;			objPts[3].y = m_board_h-1;
		
		//取得するCornerを設定
		imgPts[0] = corners[0];
		imgPts[1] = corners[m_board_w - 1];
		imgPts[2] = corners[(m_board_h - 1) * m_board_w];
		imgPts[3] = corners[(m_board_h - 1) * m_board_w + m_board_w - 1];
		
		//指定したCornerに○を作成する
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0,0,255), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0,255,0), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255,0,0), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255,255,0), 3);

		CvMat *H = cvCreateMat(3, 3, CV_32F);
		cvGetPerspectiveTransform(objPts, imgPts, H);
		
		//高さを設定する。
		CV_MAT_ELEM(*H, float, 2, 2) = m_camera_Height;
		
		//Warppingを実行
		cvWarpPerspective(inputImage_buff, birds_image, H, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		
		//鳥瞰図をOutPortに出力する。
		int len = birds_image->nChannels * birds_image->width * birds_image->height;
		m_birdImage.pixels.length(len);
		memcpy((void *)&(m_birdImage.pixels[0]), birds_image->imageData, len);

		m_birdImage.width = inputImage_buff->width;
		m_birdImage.height = inputImage_buff->height;

		m_birdImageOut.write();

		cvWaitKey(10);

		//cvShowImage("Bird_Eye", birds_image);
		cvReleaseMat(&H);

		g_temp_w = m_inputImage.width;
		g_temp_h = m_inputImage.height;

		key = 0;

	}
예제 #19
0
void BirdsIview(CvMat* intrinsic,CvMat* distortion,IplImage* image,CvPoint2D32f* corners){

	//This initializes rectification matrices  	
	IplImage* mapx = cvCreateImage( cvGetSize(image), IPL_DEPTH_32F, 1 );
	IplImage* mapy = cvCreateImage( cvGetSize(image), IPL_DEPTH_32F, 1 );
	// UNDISTORT OUR IMAGE
	cvInitUndistortMap(intrinsic,distortion,mapx,mapy);
	IplImage *t = cvCloneImage(image);
	// Rectify our image  
	cvRemap(t, image, mapx,mapy,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0));

	
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0; objPts[0].y = 0;
	objPts[1].x = board_w-1; objPts[1].y = 0;
	objPts[2].x = 0; objPts[2].y = board_h-1;
	objPts[3].x = board_w-1; objPts[3].y = board_h-1;

	//arrange points in Z orientation 
	if ( abs(corners[0].x - corners[board_w-1].x) > abs(corners[0].y - corners[board_w-1].y)  ){
		if ((corners[board_w-1].x-corners[0].x) > 0){
			imgPts[0] = corners[0];
			imgPts[1] = corners[board_w-1];
			imgPts[2] = corners[(board_h-1)*board_w];
			imgPts[3] = corners[(board_h-1)*board_w + board_w-1];
		}
		if ((corners[board_w-1].x-corners[0].x) < 0){
			imgPts[3] = corners[0];
			imgPts[2] = corners[board_w-1];
			imgPts[1] = corners[(board_h-1)*board_w];
			imgPts[0] = corners[(board_h-1)*board_w + board_w-1];
		}
	}

	if ( abs(corners[0].x - corners[board_w-1].x) < abs(corners[0].y - corners[board_w-1].y)  ){
		if ((corners[board_w-1].y-corners[0].y) > 0){		
			imgPts[1] = corners[0];
			imgPts[3] = corners[board_w-1];
			imgPts[0] = corners[(board_h-1)*board_w];
			imgPts[2] = corners[(board_h-1)*board_w + board_w-1];
		}
		if ((corners[board_w-1].y-corners[0].y) < 0){
			imgPts[2] = corners[0]; //2
			imgPts[0] = corners[board_w-1]; //0
			imgPts[3] = corners[(board_h-1)*board_w];
			imgPts[1] = corners[(board_h-1)*board_w + board_w-1]; //1
		}
	}


	// DRAW THE POINTS in order: B,G,R,YELLOW

	cvCircle(image,cvPointFrom32f(imgPts[0]),9,CV_RGB(0,0,255),3,8,0);
	cvCircle(image,cvPointFrom32f(imgPts[1]),9,CV_RGB(0,255,0),3,8,0);
	cvCircle(image,cvPointFrom32f(imgPts[2]),9,CV_RGB(255,0,0),3,8,0);
	cvCircle(image,cvPointFrom32f(imgPts[3]),9,CV_RGB(255,255,0),3,8,0);
	
	//Find the Homography
	H = cvCreateMat(3,3,CV_32F);
	cvGetPerspectiveTransform( objPts,imgPts,H );

	IplImage *birdsview_image = cvCloneImage(image);
	CV_MAT_ELEM(*H,float,2,2) += Z;
	cvWarpPerspective( 
			image,
			birdsview_image,
			H,
			CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS |CV_WARP_INVERSE_MAP  ,
			cvScalarAll(0) );
	cvShowImage(BVwindow,birdsview_image);
		
}