Ejemplo n.º 1
0
//--------------------------------------------------------------------------------
void ofxCvImage::warpPerspective( const ofPoint& A, const ofPoint& B, const ofPoint& C, const ofPoint& D ) {
	if( !bAllocated ){
		ofLogError("ofxCvImage") << "warpPerspective(): image not allocated";
		return;		
	}
    // compute matrix for perspectival warping (homography)
    CvPoint2D32f cvsrc[4];
    CvPoint2D32f cvdst[4];
    CvMat* translate = cvCreateMat( 3,3, CV_32FC1 );
    cvSetZero( translate );

    cvdst[0].x = 0;
    cvdst[0].y = 0;
    cvdst[1].x = width;
    cvdst[1].y = 0;
    cvdst[2].x = width;
    cvdst[2].y = height;
    cvdst[3].x = 0;
    cvdst[3].y = height;

    cvsrc[0].x = A.x;
    cvsrc[0].y = A.y;
    cvsrc[1].x = B.x;
    cvsrc[1].y = B.y;
    cvsrc[2].x = C.x;
    cvsrc[2].y = C.y;
    cvsrc[3].x = D.x;
    cvsrc[3].y = D.y;

    cvGetPerspectiveTransform( cvsrc, cvdst, translate );  // calculate homography
    cvWarpPerspective( cvImage, cvImageTemp, translate );
    swapTemp();
    flagImageChanged();
    cvReleaseMat( &translate );
}
Ejemplo n.º 2
0
//--------------------------------------------------------------------------------
void ofxCvImage::warpIntoMe( ofxCvImage& mom, const ofPoint src[4], const ofPoint dst[4] ){
    
	if( !bAllocated ){
		ofLogError("ofxCvImage") << "warpIntoMe(): image not allocated";
		return;		
	}
	if( !mom.bAllocated ){
		ofLogError("ofxCvImage") << "warpIntoMe(): source image not allocated";
		return;		
	}
		
	if( mom.getCvImage()->nChannels == cvImage->nChannels &&
        mom.getCvImage()->depth == cvImage->depth ) {

    	// compute matrix for perspectival warping (homography)
    	CvPoint2D32f cvsrc[4];
    	CvPoint2D32f cvdst[4];
    	CvMat* translate = cvCreateMat( 3, 3, CV_32FC1 );
    	cvSetZero( translate );
    	for (int i = 0; i < 4; i++ ) {
    		cvsrc[i].x = src[i].x;
    		cvsrc[i].y = src[i].y;
    		cvdst[i].x = dst[i].x;
    		cvdst[i].y = dst[i].y;
    	}
    	cvGetPerspectiveTransform( cvsrc, cvdst, translate );  // calculate homography
    	cvWarpPerspective( mom.getCvImage(), cvImage, translate);
        flagImageChanged();
    	cvReleaseMat( &translate );

    } else {
        ofLogError("ofxCvImage") << "warpIntoMe(): image type mismatch";
    }
}
//Input image, Edges, outputNameFile
int homography_transformation(CvMat* src, char* out_filename, CvPoint2D32f* srcQuad){
	if(src == NULL || out_filename == NULL || srcQuad == NULL)
		return -1;
	
	CvPoint2D32f dstQuad[4]; 				//Destination vertices
   	CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);  	//transformation matrix
    	CvMat* dst = cvCloneMat(src);   			//clone image  
    	int p[3]={CV_IMWRITE_JPEG_QUALITY,JPEG_QUALITY,0};	//FORMAT, QUALITY
    
    	dstQuad[0].x = 0;       	//dst Top left
    	dstQuad[0].y = 0;
    	dstQuad[1].x = src->cols;  	//dst Top right
    	dstQuad[1].y = 0;
    	dstQuad[2].x = 0;     		//dst Bottom left
    	dstQuad[2].y = src->rows;      
    	dstQuad[3].x = src->cols;  	//dst Bot right
    	dstQuad[3].y = src->rows;

    	// get transformation matrix that you can use to calculate 
    	cvGetPerspectiveTransform(srcQuad,dstQuad, warp_matrix);
    	// perspective transformation. Parameters: source, destination, warp_matrix, 
	//type of interpolation: (CV_INTER_LINEAR, CV_INTER_AREA, CV_INTER_CUBIC, CV_INTER_LANCZOS4)
  	///Set all scalar with the same value. 0 means the black color of border
	cvWarpPerspective(src, dst, warp_matrix, CV_INTER_LINEAR, cvScalarAll(0)); // 1 = CV_INTER_LINEAR

	cvSaveImage(out_filename,dst, p);
	//close and destroy all stuff

	cvReleaseMat(&warp_matrix);
	cvReleaseMat(&dst);
	return 0;
}
Ejemplo n.º 4
0
int main(int argc, char** argv)
{
	IplImage* image = cvLoadImage(argv[1], CV_LOAD_IMAGE_COLOR); // cvCreateImage(cvSize(320, 240), IPL_DEPTH_8U, 3);
	cvNamedWindow("result", CV_WINDOW_AUTOSIZE);
/*
	gsl_qrng* q = gsl_qrng_alloc(gsl_qrng_halton, 2);
	for (int i = 0; i < 64; i++)
	{
		double v[2];
		gsl_qrng_get(q, v);
		int x = (int)(v[0] * 319), y = (int)(v[1] * 239);
		image->imageData[x * 3 + y * image->widthStep] = image->imageData[x * 3 + 1 + y * image->widthStep] = image->imageData[x * 3 + 2 + y * image->widthStep] = 255;
	}
	gsl_qrng_free(q);
*/
	gsl_rng_env_setup();

	const gsl_rng_type* T = gsl_rng_default;
	gsl_rng* r = gsl_rng_alloc(T);
	gsl_rng_set(r, time(NULL));

	double sigma = 0.1 * ((image->width < image->height) ? image->width : image->height);

	CvPoint2D32f src[4], dst[4], rds[4];
	src[0].x = 0; src[0].y = 0;
	src[1].x = image->width; src[1].y = 0;
	src[2].x = image->width; src[2].y = image->height;
	src[3].x = 0; src[3].y = image->height;
	dst[0].x = -image->width / 2 + gsl_ran_gaussian(r, sigma); dst[0].y = -image->height / 2 + gsl_ran_gaussian(r, sigma);
	dst[1].x = image->width / 2 + gsl_ran_gaussian(r, sigma); dst[1].y = -image->height / 2 + gsl_ran_gaussian(r, sigma);
	dst[2].x = image->width / 2 + gsl_ran_gaussian(r, sigma); dst[2].y = image->height / 2 + gsl_ran_gaussian(r, sigma);
	dst[3].x = -image->width / 2 + gsl_ran_gaussian(r, sigma); dst[3].y = image->height / 2 + gsl_ran_gaussian(r, sigma);
	double radius = gsl_ran_gaussian(r, 3.1415926 / 8.0);
	double minx = 0, miny = 0, maxx = 0, maxy = 0;
	for (int i = 0; i < 4; i++)
	{
		rds[i].x = dst[i].x * cos(radius) - dst[i].y * sin(radius);
		rds[i].y = dst[i].x * sin(radius) + dst[i].y * cos(radius);
		if (rds[i].x < minx) minx = rds[i].x;
		if (rds[i].y < miny) miny = rds[i].y;
		if (rds[i].x > maxx) maxx = rds[i].x;
		if (rds[i].y > maxy) maxy = rds[i].y;
	}
	for (int i = 0; i < 4; i++)
	{
		rds[i].x -= minx;
		rds[i].y -= miny;
	}
	gsl_rng_free(r);
	float _m[9];
	CvMat m = cvMat(3, 3, CV_32FC1, _m);
	cvGetPerspectiveTransform(src, rds, &m);
	IplImage* transformed = cvCreateImage(cvSize(maxx - minx, maxy - miny), IPL_DEPTH_8U, 3);
	cvWarpPerspective(image, transformed, &m);
	cvShowImage("result", transformed);
	cvWaitKey(0);
	cvDestroyWindow("result");
	return 0;
}
Ejemplo n.º 5
0
// A function to calculate the transformation matrix used for perspective transformation
void calculateTransformationMatrix( BoundingBox* from, BoundingBox* to, CvMat* transMat ) {
    CvPoint2D32f from_arr[] = { 
        cvPointTo32f( from->topLeft ),
        cvPointTo32f( from->topRight ),
        cvPointTo32f( from->bottomRight ),
        cvPointTo32f( from->bottomLeft )
    };
    CvPoint2D32f to_arr[] = {
        cvPointTo32f( to->topLeft ),
        cvPointTo32f( to->topRight ),
        cvPointTo32f( to->bottomRight ),
        cvPointTo32f( to->bottomLeft )
    };
    cvGetPerspectiveTransform( from_arr, to_arr, transMat );
}
Ejemplo n.º 6
0
void COpenCVMFCView::OnWarpPerspect()
{
	// TODO: Add your command handler code here

	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);
	IplImage *src=0, *dst=0;

	src = cvCloneImage(workImg);
	cvFlip(src);
	dst = cvCloneImage(src);
	dst->origin = src->origin;
	cvZero(dst);

	srcQuad[0].x = 0;                         //src Top left
	srcQuad[0].y = 0;
	srcQuad[1].x = (float) src->width - 1;    //src Top right
	srcQuad[1].y = 0;
	srcQuad[2].x = 0;                         //src Bottom left
	srcQuad[2].y = (float) src->height - 1;
	srcQuad[3].x = (float) src->width - 1;    //src Bot right
	srcQuad[3].y = (float) src->height - 1;
	//- - - - - - - - - - - - - -//
	dstQuad[0].x = (float)(src->width*0.05);  //dst Top left
	dstQuad[0].y = (float)(src->height*0.33);
	dstQuad[1].x = (float)(src->width*0.9);   //dst Top right
	dstQuad[1].y = (float)(src->height*0.25);
	dstQuad[2].x = (float)(src->width*0.2);   //dst Bottom left
	dstQuad[2].y = (float)(src->height*0.7);      
	dstQuad[3].x = (float)(src->width*0.8);   //dst Bot right
	dstQuad[3].y = (float)(src->height*0.9);

	cvGetPerspectiveTransform(srcQuad,dstQuad,warp_matrix);
	cvWarpPerspective(src,dst,warp_matrix);

	cvNamedWindow( "Perspective_Warp", 1 );
	cvShowImage( "Perspective_Warp", dst );

	m_ImageType=-3;
	cvWaitKey();

	cvDestroyWindow( "Perspective_Warp" );
	cvReleaseImage(&src);
	cvReleaseImage(&dst);
	cvReleaseMat(&warp_matrix);

	m_ImageType=imageType(workImg);
}
void perspectiveTransform() {

    processImage = windowImage;
    
    setVariables();                                                                           //设置四个预设的点
    setPoints();                                                                              //选取图片上的点
    cvDestroyWindow("monitor");
    windowImage = cvCreateImage(cvSize(squareWindowSize,squareWindowSize), IPL_DEPTH_8U, 3);
    cvGetPerspectiveTransform(originalPoints, transPoints, transmat);                         //计算transmat的值
    cvWarpPerspective(processImage , windowImage , transmat);                                 //这一句利用transmat进行变换
    cvNamedWindow("control");
    cvMoveWindow("control", middlewindowX, middlewindowY);
    cvShowImage("control", windowImage);
    cvWaitKey();
    cvDestroyWindow("control");
}
int main(int argc, const char * argv[]) {
    
    CvPoint2D32f srcQuad[4], dstQuad[4];
    CvMat* warp_matrix = cvCreateMat(3, 3, CV_32FC1);
    IplImage *src, *dst;
    
    if (argc == 2 && ((src = cvLoadImage(argv[1], 1)) != 0)) {
        dst = cvCloneImage( src );
        dst->origin = src->origin;
        cvZero( dst );
        
        // warp matrix
        srcQuad[0].x = 0;
        srcQuad[0].y = 0;
        srcQuad[1].x = src->width - 1;
        srcQuad[1].y = 0;
        srcQuad[2].x = 0;
        srcQuad[2].y = src->height - 1;
        srcQuad[3].x = src->width - 1;
        srcQuad[3].y = src->height - 1;
        
        dstQuad[0].x = src->width * 0.005;
        dstQuad[0].y = src->height * 0.33;
        dstQuad[1].x = src->width * 0.9;
        dstQuad[1].y = src->height * 0.25;
        dstQuad[2].x = src->width * 0.2;
        dstQuad[2].y = src->height * 0.7;
        dstQuad[3].x = src->width * 0.8;
        dstQuad[3].y = src->height * 0.9;
        
        cvGetPerspectiveTransform( srcQuad, dstQuad, warp_matrix );
        cvWarpPerspective( src, dst, warp_matrix );
        
        
        cvNamedWindow( "Perspective_Transform", 1 );
        cvShowImage( "Perspective_Transform" , dst );
        
        cvWaitKey();
        
        cvReleaseImage( &dst );

    }
    
    cvReleaseMat( &warp_matrix );
    
    return 0;
}
Ejemplo n.º 9
0
IplImage *square_puzzle(IplImage *in, const CvPoint2D32f *location) {
    int xsize = location[1].x - location[0].x;
    int ysize = xsize;

    CvPoint2D32f warped_coordinates[4];
    warped_coordinates[0] = cvPointTo32f(cvPoint(0,       0));
    warped_coordinates[1] = cvPointTo32f(cvPoint(xsize-1, 0));
    warped_coordinates[2] = cvPointTo32f(cvPoint(xsize-1, ysize-1));
    warped_coordinates[3] = cvPointTo32f(cvPoint(0,       ysize-1));

    CvMat *map_matrix = cvCreateMat(3, 3, CV_64FC1);
    cvGetPerspectiveTransform(location, warped_coordinates, map_matrix);

    IplImage *warped_image = cvCreateImage(cvSize(xsize, ysize), 8, in->nChannels);
    CvScalar fillval=cvScalarAll(0);
    cvWarpPerspective(in, warped_image, map_matrix, CV_WARP_FILL_OUTLIERS, fillval);

    return warped_image;
}
Ejemplo n.º 10
0
void opencv_birds_eye_remap(ProbabilisticMapParams map, const IplImage *src_image, IplImage *dst_image, double camera_height, double camera_pitch, stereo_util stereo_util_instance)
{
  // coordinates of 4 quadrangle vertices in the source image.
  CvPoint2D32f src_pts[4] = { cvPoint2D32f(180, 400), cvPoint2D32f(180, 320), cvPoint2D32f(520, 320), cvPoint2D32f(520, 400) };

  // coordinates of the 4 corresponding quadrangle vertices in the destination image.
  CvPoint2D32f dst_pts[4];

  for (int i = 0; i < 4; i++)
  {
    carmen_position_t right_point;
    right_point.x = src_pts[i].x;
    right_point.y = src_pts[i].y;

    carmen_vector_3D_t p3D = camera_to_world(right_point, camera_height, camera_pitch, stereo_util_instance);

    int x_map = map_grid_x(map, p3D.x);
    int y_map = map_grid_y(map, p3D.y);
    dst_pts[i] = cvPoint2D32f(x_map, y_map);
  }

  // FIND THE HOMOGRAPHY
  static CvMat *homography_matrix;
  if (homography_matrix == NULL)
    homography_matrix = cvCreateMat(3, 3, CV_32F);
  cvGetPerspectiveTransform(dst_pts, src_pts, homography_matrix);

  float Z = camera_height;

  CV_MAT_ELEM(*homography_matrix, float, 2, 2) = Z;
  cvWarpPerspective(src_image,
      dst_image,
      homography_matrix,
      CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS,
      cvScalarAll(0));
}
Ejemplo n.º 11
0
IplImage* cutSign(IplImage* origImg, CvPoint* corners, int numcorners, bool drawcircles)
{

	// convert corners to CvPoint2D32f.
        CvPoint2D32f cornersf[numcorners];
        for (int i=0; i<numcorners; ++i)
                cornersf[i] = cvPointTo32f(corners[i]);

	if (_debug) printf("Corners: %d,%d %d,%d %d,%d %d,%d\n",corners[0].x,corners[0].y,corners[1].x,corners[1].y,corners[2].x,corners[2].y,corners[3].x,corners[3].y);

	// Create target-image with right size.
        double xDiffBottom = pointDist(corners[0], corners[1]);
        double yDiffLeft = pointDist(corners[0], corners[3]);
        IplImage* cut = cvCreateImage(cvSize(xDiffBottom,yDiffLeft), IPL_DEPTH_8U, 3);

	// target points for perspective correction.
        CvPoint2D32f cornerstarget[numcorners];
        cornerstarget[0] = cvPoint2D32f(0,0);
        cornerstarget[1] = cvPoint2D32f(cut->width-1,0);
        cornerstarget[2]= cvPoint2D32f(cut->width-1,cut->height-1);
        cornerstarget[3] = cvPoint2D32f(0,cut->height-1);
	if (_debug) printf("Corners: %f,%f %f,%f %f,%f %f,%f\n",cornerstarget[0].x,cornerstarget[0].y,cornerstarget[1].x,cornerstarget[1].y,cornerstarget[2].x,cornerstarget[2].y,cornerstarget[3].x,cornerstarget[3].y);
        
	// Apply perspective correction to the image.
        CvMat* transmat = cvCreateMat(3, 3, CV_32FC1); // Colums, rows ?
        transmat = cvGetPerspectiveTransform(cornersf,cornerstarget,transmat);
        cvWarpPerspective(origImg,cut,transmat);
        cvReleaseMat(&transmat);

	// Draw yellow circles around the corners.
	if (drawcircles)
		for (int i=0; i<numcorners; ++i)
			cvCircle(origImg, corners[i],5,CV_RGB(255,255,0),2);

        return cut;
}
Ejemplo n.º 12
0
int main(int argc, char *argv[]) {
	if (argc < 3){
		printf("Usage: %s <image-file-name1> <image-file-name2>\n", argv[0]);
		exit(1);
	}

	IplImage* img1 = cvLoadImage(argv[1], CV_LOAD_IMAGE_GRAYSCALE);
	if (!img1) {
		printf("Could not load image file: %s\n", argv[1]);
		exit(1);
	}
	IplImage* img1f = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1);
	cvConvertScale(img1, img1f, 1.0 / 255.0);

	IplImage* img2 = cvLoadImage(argv[2], CV_LOAD_IMAGE_GRAYSCALE);
	if (!img2) {
		printf("Could not load image file: %s\n", argv[2]);
		exit(1);
	}
	IplImage* img2f = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1);
	cvConvertScale(img2, img2f, 1.0 / 255.0);

	/**
	 * Aufgabe: Homographien (5 Punkte)
	 * 
	 * Unter der Annahme, dass Bilder mit einer verzerrungsfreien Lochbildkamera
	 * aufgenommen werden, kann man Aufnahmen mit verschiedenen Bildebenen und
	 * gleichem Projektionszentren durch projektive Abbildungen, sogenannte
	 * Homographien, beschreiben.
	 * 
	 * - Schreibe Translation als Homographie auf (auf Papier!).
	 * - Verschiebe die Bildebene eines Testbildes um 20 Pixel nach rechts, ohne
	 *   das Projektionszentrum zu ändern. Benutze dafür \code{cvWarpPerspective}.
	 * - Wieviele Punktkorrespondenzen benötigt man mindestens, um eine projektive
	 *   Abbildung zwischen zwei Bildern bis auf eine Skalierung eindeutig zu
	 *   bestimmen? Warum? (Schriftlich beantworten!)
	 */

/* TODO */
	IplImage* img_moved = cvCreateImage(cvGetSize(img1), IPL_DEPTH_32F, 1);
	cv::Mat matImg1f_task1 = cv::Mat(img1f);
	cv::Mat matImgMoved = cv::Mat(img_moved);

	float data[] = { 1, 0, -20, 0, 1, 0, 0, 0, 1 };
	cv::Mat trans(3, 3, CV_32FC1, data);;
	cv::warpPerspective(matImg1f_task1, matImgMoved, trans, matImgMoved.size());
	cv::namedWindow("mainWin", CV_WINDOW_AUTOSIZE);
	cv::Mat img_moved_final(img_moved);
	cv::imshow("mainWin", img_moved_final);
	cvWaitKey(0);

	/**
	 * Aufgabe: Panorama (15 Punkte)
	 *
	 * Ziel dieser Aufgabe ist es, aus zwei gegebenen Bildern ein Panorama zu konstruieren.
	 * \begin{center}
	 * \includegraphics[width = 0.3\linewidth]{left.png}
	 * \includegraphics[width = 0.3\linewidth]{right.png}
	 * \end{center}
	 * 
	 * Dafür muss zunächst aus den gegeben Punktkorrespondenzen
	 * \begin{center}
	 * \begin{tabular}{|c|c|}
	 * \hline
	 * linkes Bild & rechtes Bild \\
	 * $(x, y)$ & $(x, y)$ \\ \hline \hline
	 * (463, 164) & (225, 179)\\ \hline
	 * (530, 357) & (294, 370)\\ \hline
	 * (618, 357) &(379, 367)\\ \hline
	 * (610, 153) & (369, 168)\\ \hline
	 * \end{tabular}
	 * \end{center}
	 * eine perspektivische Transformation bestimmt werden, mit der die Bilder auf eine gemeinsame Bildebene transformiert werden können.
	 * 
	 * - Berechne die Transformation aus den gegebenen Punktkorrespondenzen.
	 *   Benutze die Funktion \code{cvGetPerspectiveTransform}. Was ist die
	 *   zentrale Idee des DLT-Algorithmus, wie er in der Vorlesung vorgestellt
	 *   wurde?
	*/

/* TODO */
	CvMat *P = cvCreateMat(3, 3, CV_32FC1);
	CvPoint points1[] = { cvPoint(463, 164), cvPoint(530, 357), cvPoint(618, 357), cvPoint(610, 153) };
	CvPoint points2[] = { cvPoint(225, 179), cvPoint(294, 370), cvPoint(379, 367), cvPoint(369, 168) };
	CvPoint2D32f pt1[4], pt2[4];
	for (int i = 0; i < 4; ++i) {
		pt2[i].x = points2[i].x;
		pt2[i].y = points2[i].y;
		pt1[i].x = points1[i].x;
		pt1[i].y = points1[i].y;
	}
	cvGetPerspectiveTransform(pt1, pt2, P);
	
	/**
	 * - Bestimme die notwendige Bildgröße für das Panoramabild.
	 */

/* TODO */
	int h = img1f->height - 1;
	int w = img1f->width - 1;
	float p1[] = { 0.0, 0.0, 1.0 };
	float p2[] = { 0.0, (float)(h), 1.0 };
	float p3[] = { (float)(w), (float)(h), 1.0 };
	float p4[] = { (float)(w), 0.0, 1.0 };

	
	cv::Mat P1 = P * cv::Mat(3, 1, CV_32FC1, p1);
	cv::Mat P2 = P * cv::Mat(3, 1, CV_32FC1, p2);
	cv::Mat P3 = P * cv::Mat(3, 1, CV_32FC1, p3);
	cv::Mat P4 = P * cv::Mat(3, 1, CV_32FC1, p4);

	// mustn't be zero
	assert(P1.at<float>(2,0) != 0 && P2.at<float>(2,0) != 0 && P3.at<float>(2,0) != 0 && P4.at<float>(2,0) != 0);

	P1 = P1 / P1.at<float>(2,0);
	P2 = P2 / P2.at<float>(2,0);
	P3 = P3 / P3.at<float>(2,0);
	P4 = P4 / P4.at<float>(2,0);



	/**
	 * - Projiziere das linke Bild in die Bildebene des rechten Bildes. Beachte
	 *   dabei, dass auch der linke Bildrand in das Panoramabild projiziert
	 *   wird.
	 */

/* TODO */
	///////// Hier wird irgendwo ein fehler sein bei der Groesse...
	std::vector<cv::Mat*> matrices;
	matrices.push_back(&P1);
	matrices.push_back(&P2);
	matrices.push_back(&P3);
	matrices.push_back(&P4);
	cv::Point minP(P1.at<float>(0,0), P1.at<float>(1,0)), maxP(P1.at<float>(0,0), P1.at<float>(1,0));
	for(int i = 0; i < matrices.size(); ++i) {
			minP.x = (int)(min(matrices[i]->at<float>(0,0), (float)minP.x));
			minP.y = (int)(min(matrices[i]->at<float>(1,0), (float)minP.y));

			maxP.x = (int)(max(matrices[i]->at<float>(0,0), (float)maxP.x)+1.0);
			maxP.y = (int)(max(matrices[i]->at<float>(1,0), (float)maxP.y)+1.0);
	}

	minP.x = min(minP.x, 0); minP.y = min(minP.y, 0);
	maxP.x = max(maxP.x, img1f->width-1); maxP.y = max(maxP.y, img1f->height-1);
	// create image
	cv::Mat Panorama = cv::Mat(cv::Size(maxP-minP),  CV_32FC1, cv::Scalar(0.0));
	cv::Mat PLeft = cv::Mat(cv::Size(maxP-minP),  CV_32FC1, cv::Scalar(0.0));
	cv::Mat PRight = cv::Mat(cv::Size(maxP-minP),  CV_32FC1, cv::Scalar(0.0));

	cv::Mat matImg1f = cv::Mat( img1f);
	cv::Mat matImg2f = cv::Mat( img2f);
	for(int y=0; y < matImg1f.rows; ++y ) {
		for(int x=0; x < matImg1f.cols; ++x ) {
			PLeft.at<float>(y,x) = matImg1f.at<float>(y,x);
		}
	}
	for(int y=0; y < matImg2f.rows; ++y ) {
		for(int x=0; x < matImg2f.cols; ++x ) {
			PRight.at<float>(y,x) = matImg2f.at<float>(y,x);
		}
	}

	
	cv::imshow("mainWin", PLeft);
	cv::waitKey(0);
	cv::imshow("mainWin", PRight);
	cv::waitKey(0);

	float trans2[] = { 1.0, 0.0, -minP.x, 0.0, 1.0, -minP.y, 0.0, 0.0, 1.0};
	cv::Mat translation(3,3,CV_32FC1,trans2);
	//translate P
	cv::Mat Pnew = translation*cv::Mat(P);
	cv::warpPerspective(PLeft, Panorama, Pnew, Panorama.size());
	cv::warpPerspective(PRight, PLeft, translation, PLeft.size());
	PRight = PLeft.clone();

	cv::imshow("mainWin", PLeft);
	cv::waitKey(0);
	cv::imshow("mainWin", Panorama);
	cv::waitKey(0);
	/**
	 * - Bilde das Panoramabild, so dass Pixel, für die zwei Werte vorhanden sind,
	 *   den Mittelwert zugeordnet bekommen.
	 */

	cv::Mat mask = (Panorama > 0.0) & (PLeft > 0.0);
	cv::imshow("mainWin", mask);
	cv::waitKey(0);

	mask.convertTo(mask,CV_32FC1, 0.5/255.); 
	cv::Mat weighted = cv::Mat(Panorama.size(),  CV_32FC1, cv::Scalar(1.0)) - mask;

	Panorama = Panorama + PLeft;
	cv::multiply(Panorama, weighted, Panorama);

	cv::imshow("mainWin", Panorama);
	cv::waitKey(0);
/* TODO */

	/**
	 * - Zeige das Panoramabild an.
	 */

/* TODO */

}
Ejemplo n.º 13
0
void bird_eye() {
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat*) cvLoad("Intrinsics.xml");
	CvMat *distortion = (CvMat*) cvLoad("Distortion.xml");

	IplImage* image = cvLoadImage("./Resource/bird-eye.jpg", 1);
	IplImage* gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	IplImage* mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage* mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	
	cvInitUndistortMap(
			intrinsic, 
			distortion, 
			mapx, 
			mapy
	);
	
	IplImage* t = cvCloneImage(image);

	cvRemap(t, image, mapx, mapy);
	
	cvNamedWindow("Chessboard");
	cvShowImage("Chessboard", image);
	int c = cvWaitKey(-1);
	CvPoint2D32f* corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	
	int found = cvFindChessboardCorners(
			image, 
			board_sz, 
			corners, 
			&corner_count, 
			CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS
	);
	
	if(!found){
		printf("couldn't aquire chessboard!\n");
		return;
	}
	
	cvFindCornerSubPix(
			gray_image, 
			corners, 
			corner_count, 
			cvSize(11, 11), 
			cvSize(-1, -1), 
			cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 30, 0.1)
	);

	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;			objPts[0].y = 0;
	objPts[1].x = board_w - 1;	objPts[1].y = 0;
	objPts[2].x = 0;			objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;	objPts[3].y = board_h - 1;
	imgPts[0]   = corners[0];
	imgPts[1]	= corners[board_w - 1];
	imgPts[2]	= corners[(board_h - 1) * board_w];
	imgPts[3]	= corners[(board_h - 1) * board_w + board_w - 1];

	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	cvDrawChessboardCorners(
		image,
		board_sz,
		corners,
		corner_count,
		found
	);

	cvShowImage("Chessboard", image);

	CvMat *H = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	float z = 25;
	int key = 0;
	IplImage * birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");

	while(key != 27) {
		CV_MAT_ELEM(*H, float, 2, 2) = z;

		cvWarpPerspective(
			image,
			birds_image,
			H,
			CV_INTER_LINEAR| CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS
		);

		cvShowImage("Birds_Eye", birds_image);

		key = cvWaitKey();
		if(key == 'u') z += 0.5;
		if(key == 'd') z -= 0.5;
	}

	cvSave("H.xml", H);
}
//추후 수정
void FkPaperKeyboard_TypeA::cornerVerification(IplImage* srcImage){
	CvSize size = cvGetSize(srcImage);
	IplImage* eigImage = cvCreateImage(size, IPL_DEPTH_8U,1);
	IplImage* tempImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* grayImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* veriImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* dstImage = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* mask = cvCreateImage(size, IPL_DEPTH_8U, 1);
	IplImage* mask2 = cvCreateImage(size, IPL_DEPTH_8U, 1);
	CvRect rect = cvRect(10, 10, 640 - 20, 480 - 20);

	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3, CV_32FC1);
	CvMat* warp_matrix_invert = cvCreateMat(3,3, CV_32FC1);
	CvMat* result = cvCreateMat(3, 1, CV_32FC1);
	CvMat* dst = cvCreateMat(3, 1,CV_32FC1);

	int keyButtonCornerCount = 316;
	
	cvCvtColor(srcImage, grayImage, CV_BGR2GRAY);
	cvSetImageROI(grayImage, rect);
	cvSetImageROI(mask, rect);
	cvSetImageROI(dstImage, rect);
	cvSetImageROI(mask2, rect);

	// 150~255사이의 값만 추출해서 마스크에 저장
	cvInRangeS(grayImage, cvScalar(100, 100, 100), cvScalar(255, 255, 255), mask);
	cvCopy(mask, mask2);

	//cvShowImage("mask", mask);
	//cvShowImage("mask2", mask2);

	// 20,20? 150 미만의 값을 제외하기 위해 0인 값(mask)과 추출한 값(mask2)을 XOR 연산 한다.
	cvFloodFill(mask, cvPoint(10, 10), cvScalar(0, 0, 0));
	cvXor(mask2, mask, dstImage);
	
	//cvShowImage("mask3", mask);
	//cvShowImage("mask4", mask2);
	//cvShowImage("dstImage", dstImage);

	// 최종 연산된 이미지에서 코너 추출(각 키패드의 코너)
	cvGoodFeaturesToTrack(dstImage, eigImage, tempImage, keyButtonCorner, &keyButtonCornerCount, 0.01, 7, NULL, 7, 0);
	cvFindCornerSubPix (dstImage, keyButtonCorner, keyButtonCornerCount,cvSize (3, 3), cvSize (-1, -1), cvTermCriteria (CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));
	
	cvResetImageROI(dstImage);
	for(int i =0 ; i < 316 ; i++){
		keyButtonCorner[i].x += rect.x;
		keyButtonCorner[i].y += rect.y;
	}
	
	initKeyButtonCorner();
	
	srcQuad[CLOCKWISE_1].x = keyButtonCorner[315].x+10;
	srcQuad[CLOCKWISE_1].y = keyButtonCorner[315].y-10;
	srcQuad[CLOCKWISE_5].x = keyButtonCorner[31].x + 10;
	srcQuad[CLOCKWISE_5].y = keyButtonCorner[31].y + 10;
	srcQuad[CLOCKWISE_7].x = keyButtonCorner[0].x - 10;
	srcQuad[CLOCKWISE_7].y = keyButtonCorner[0].y + 10;
	srcQuad[CLOCKWISE_11].x = keyButtonCorner[290].x - 10;
	srcQuad[CLOCKWISE_11].y = keyButtonCorner[290].y - 10;
	dstQuad[CLOCKWISE_1].x = 640;
	dstQuad[CLOCKWISE_1].y = 0;
	dstQuad[CLOCKWISE_5].x = 640;
	dstQuad[CLOCKWISE_5].y = 480;
	dstQuad[CLOCKWISE_7].x = 0;
	dstQuad[CLOCKWISE_7].y = 480;
	dstQuad[CLOCKWISE_11].x = 0;
	dstQuad[CLOCKWISE_11].y = 0;
	cvGetPerspectiveTransform(srcQuad, dstQuad, warp_matrix);
	
	cvWarpPerspective(dstImage, veriImage, warp_matrix);
	detectKeyButtonCorner(veriImage);
	cvInvert(warp_matrix, warp_matrix_invert);
	for(int i = 0 ; i < 316 ; i++){	
		cvmSet(dst, 0, 0, keyButtonCorner[i].x);  
		cvmSet(dst, 1, 0, keyButtonCorner[i].y);
		cvmSet(dst, 2, 0, 1);

		cvMatMul(warp_matrix_invert, dst, result);
		float t = cvmGet(result, 2,0);
		keyButtonCorner[i].x = cvmGet(result, 0,0)/t ;
		keyButtonCorner[i].y = cvmGet(result, 1,0)/t ;
	}
	cvResetImageROI(srcImage);
	cvResetImageROI(mask);
	cvReleaseImage(&eigImage);
	cvReleaseImage(&tempImage);
	cvReleaseImage(&grayImage);
	cvReleaseImage(&veriImage);
	cvReleaseImage(&dstImage);
	cvReleaseImage(&mask);
	cvReleaseImage(&mask2);
	cvReleaseMat(&warp_matrix);
	cvReleaseMat(&warp_matrix_invert);
	cvReleaseMat(&result);
	cvReleaseMat(&dst);	
}
void FkPaperKeyboard_TypeA::setKeyButton(IplImage* srcImage){
	//꼭지점
	CvPoint2D32f srcQuad[4], dstQuad[4];

	IplImage* perspectiveTransImage = cvCreateImage(cvSize(640,480), IPL_DEPTH_8U, 3);

	CvMat* warp_matrix = cvCreateMat(3,3, CV_32FC1);
	CvMat* warp_matrix_invert = cvCreateMat(3,3, CV_32FC1);
	CvMat* result = cvCreateMat(3, 1, CV_32FC1);
	CvMat* dst = cvCreateMat(3, 1,CV_32FC1);

	sortPaperKeyboardCorner();
	
	srcQuad[CLOCKWISE_1] = keyboardCorner[1];
	srcQuad[CLOCKWISE_5] = keyboardCorner[2];
	srcQuad[CLOCKWISE_7] = keyboardCorner[3];
	srcQuad[CLOCKWISE_11] = keyboardCorner[0];

	dstQuad[CLOCKWISE_1].x = 640;
	dstQuad[CLOCKWISE_1].y = 0;
	dstQuad[CLOCKWISE_5].x = 640;
	dstQuad[CLOCKWISE_5].y = 480;
	dstQuad[CLOCKWISE_7].x = 0;
	dstQuad[CLOCKWISE_7].y = 480;
	dstQuad[CLOCKWISE_11].x = 0;
	dstQuad[CLOCKWISE_11].y = 0;	

	// 원근변환 후 검사한다.
	cvGetPerspectiveTransform(srcQuad, dstQuad, warp_matrix); // 변환 값 계산
	cvWarpPerspective(srcImage, perspectiveTransImage, warp_matrix);	// 투영 변환 행렬을 구한다
	cornerVerification(perspectiveTransImage);	// 추출된 행렬의 유효성을 검사한다
	cvInvert(warp_matrix, warp_matrix_invert);	// 
	
	//cvShowImage("srcImage",srcImage);
	//cvShowImage("perspectiveTransImage", perspectiveTransImage);

	for(int i = 0 ; i < 316 ; i++){
		cvmSet(dst, 0, 0, keyButtonCorner[i].x);  
		cvmSet(dst, 1, 0, keyButtonCorner[i].y);
		cvmSet(dst, 2, 0, 1);

		cvMatMul(warp_matrix_invert, dst, result);
		float t = cvmGet(result, 2,0);
		keyButtonCorner[i].x = cvmGet(result, 0,0)/t;
		keyButtonCorner[i].y = cvmGet(result, 1,0)/t;
	}
	
	setKeyButtonArea(keyButtonCorner, 0, 16);
	setKeyButtonArea(keyButtonCorner, 64, 14);
	setKeyButtonArea(keyButtonCorner, 120, 14);
	setKeyButtonArea(keyButtonCorner, 176, 13);
	setKeyButtonArea(keyButtonCorner, 228, 12);
	setKeyButtonArea(keyButtonCorner, 276, 7);
	setDirectionKeyButtonArea(keyButtonCorner, 306, 2, 77);
	setDirectionKeyButtonArea(keyButtonCorner, 304, 4, 76);
	setDirectionKeyButtonArea(keyButtonCorner, 308, 3, 78);
	
	cvReleaseImage(&perspectiveTransImage);

	cvReleaseMat(&warp_matrix);
	cvReleaseMat(&warp_matrix_invert);
	cvReleaseMat(&result);
	cvReleaseMat(&dst);
}
Ejemplo n.º 16
0
void the_project::project_getting()
{
	cout << "Ready to go?\n";
	cin.get();
	get_in = cvCreateImage(image_size, IPL_DEPTH_8U, 3);

	if(v_c_flag==0){
	// get a image of path
	///*
		IplImage *new_one;
		cout << "调整照相机位置并获取图像,Sapce to continue:\n";
		cvNamedWindow("Adjust");
		while(1){
			if(cvWaitKey(10)==' '){
				cvCopyImage(new_one,get_in);
				cvDestroyWindow("Adjust");
				cvReleaseCapture(&for_cam);
				break;
			}
		new_one = cvQueryFrame(for_cam);
		cvShowImage("Adjust",new_one);
	}
		cvSaveImage("image_origin.jpg",get_in);
	//*/
	}
	else if(v_c_flag==1){
	//for_test
		get_in = cvQueryFrame(for_video);
	}

	cvNamedWindow("win1");
	cvShowImage("win1",get_in);
	// transform
	get_change = cvCreateImage(image_size, IPL_DEPTH_8U, 3);
	transmat = cvCreateMat(3, 3, CV_32FC1);

	vector<CvPoint2D32f> points;
	void * p_pointer = 
		reinterpret_cast<void *>(&points);

	cout << "choose four points for transform.\n";
	cvSetMouseCallback("win1", mouse,p_pointer);
	cvWaitKey();

	cvDestroyWindow("win1");
	for(int i=0;i<4;i++)
		originpoints[i] = points[i];

	cvGetPerspectiveTransform(originpoints, newpoints, transmat);
	cvWarpPerspective(get_in, get_change, transmat);

	cvNamedWindow("win1");
	cvShowImage("win1",get_in);
	cvNamedWindow("win2");
	cvShowImage("win2",get_change);

	cvSaveImage("image_square.jpg",get_change);

	get_unchange = cvCreateImage(image_size, IPL_DEPTH_8U, 3);
	get_looking = cvCreateImage(image_size, IPL_DEPTH_8U, 3);
	cvCopyImage(get_change,get_unchange);

	cout << "Press any key to continue...\n";
	cvWaitKey();
}
Ejemplo n.º 17
0
void BirdsIview(CvMat* intrinsic,CvMat* distortion,IplImage* image,CvPoint2D32f* corners){

	//This initializes rectification matrices  	
	IplImage* mapx = cvCreateImage( cvGetSize(image), IPL_DEPTH_32F, 1 );
	IplImage* mapy = cvCreateImage( cvGetSize(image), IPL_DEPTH_32F, 1 );
	// UNDISTORT OUR IMAGE
	cvInitUndistortMap(intrinsic,distortion,mapx,mapy);
	IplImage *t = cvCloneImage(image);
	// Rectify our image  
	cvRemap(t, image, mapx,mapy,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS,cvScalarAll(0));

	
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0; objPts[0].y = 0;
	objPts[1].x = board_w-1; objPts[1].y = 0;
	objPts[2].x = 0; objPts[2].y = board_h-1;
	objPts[3].x = board_w-1; objPts[3].y = board_h-1;

	//arrange points in Z orientation 
	if ( abs(corners[0].x - corners[board_w-1].x) > abs(corners[0].y - corners[board_w-1].y)  ){
		if ((corners[board_w-1].x-corners[0].x) > 0){
			imgPts[0] = corners[0];
			imgPts[1] = corners[board_w-1];
			imgPts[2] = corners[(board_h-1)*board_w];
			imgPts[3] = corners[(board_h-1)*board_w + board_w-1];
		}
		if ((corners[board_w-1].x-corners[0].x) < 0){
			imgPts[3] = corners[0];
			imgPts[2] = corners[board_w-1];
			imgPts[1] = corners[(board_h-1)*board_w];
			imgPts[0] = corners[(board_h-1)*board_w + board_w-1];
		}
	}

	if ( abs(corners[0].x - corners[board_w-1].x) < abs(corners[0].y - corners[board_w-1].y)  ){
		if ((corners[board_w-1].y-corners[0].y) > 0){		
			imgPts[1] = corners[0];
			imgPts[3] = corners[board_w-1];
			imgPts[0] = corners[(board_h-1)*board_w];
			imgPts[2] = corners[(board_h-1)*board_w + board_w-1];
		}
		if ((corners[board_w-1].y-corners[0].y) < 0){
			imgPts[2] = corners[0]; //2
			imgPts[0] = corners[board_w-1]; //0
			imgPts[3] = corners[(board_h-1)*board_w];
			imgPts[1] = corners[(board_h-1)*board_w + board_w-1]; //1
		}
	}


	// DRAW THE POINTS in order: B,G,R,YELLOW

	cvCircle(image,cvPointFrom32f(imgPts[0]),9,CV_RGB(0,0,255),3,8,0);
	cvCircle(image,cvPointFrom32f(imgPts[1]),9,CV_RGB(0,255,0),3,8,0);
	cvCircle(image,cvPointFrom32f(imgPts[2]),9,CV_RGB(255,0,0),3,8,0);
	cvCircle(image,cvPointFrom32f(imgPts[3]),9,CV_RGB(255,255,0),3,8,0);
	
	//Find the Homography
	H = cvCreateMat(3,3,CV_32F);
	cvGetPerspectiveTransform( objPts,imgPts,H );

	IplImage *birdsview_image = cvCloneImage(image);
	CV_MAT_ELEM(*H,float,2,2) += Z;
	cvWarpPerspective( 
			image,
			birdsview_image,
			H,
			CV_INTER_LINEAR | CV_WARP_FILL_OUTLIERS |CV_WARP_INVERSE_MAP  ,
			cvScalarAll(0) );
	cvShowImage(BVwindow,birdsview_image);
		
}
Ejemplo n.º 18
0
int match_X(IplImage* binary, CvPoint* points, int pixel_count, CvPoint* r_point, int cent_x, int cent_y, IplImage* debug){

    int i,x,y; //useful variable names
    CvPoint* corners; //the corners of the image
    
    corners = (CvPoint*)calloc(4, sizeof(CvPoint)); 
    corners[0].x = r_point->x;
    corners[0].y = r_point->y;

    //find the angle of the first corner
    x = corners[0].x - cent_x;
    y = corners[0].y - cent_y;
    int theta = arctan(y,x);

    //find the other 3 corners
    int maxr1 = 0;
    int maxr2 = 0;
    int maxr3 = 0;
    //keep track of the pixel sums on either side of angle0 
    int pxsum1 = 0; //this is bigger --> corner 0 goes in upper left
    int pxsum2 = 0; // this is bigger --> corner 0 goes in upper right
    for(i = pixel_count -1; i>=0; i--){
        x = points[i].x - cent_x;
        y = points[i].y - cent_y;
        int tempang = arctan(y,x);

        //normalize angles
        if (tempang < theta) tempang += 360;
        
        //find tempr
        int tempr = (int)sqrt((double)(x*x + y*y));

        if( tempang - theta > 315 ){
            //we are in the same quadrant as the origional angle
            pxsum1++;
        } else if( tempang - theta > 225){
            //call this quadrant 3
            if( tempr > maxr3){
                maxr3 = tempr;
                corners[3].x = points[i].x;
                corners[3].y = points[i].y;
            }
       } else if( tempang - theta > 135) {
            //call this quadrant 2
            if( tempr > maxr2) {
                maxr2 = tempr;
                corners[2].x = points[i].x;
                corners[2].y = points[i].y;
            }
       } else if( tempang - theta > 45) {
            //call this quadrant 1
            if( tempr > maxr1) {
                maxr1 = tempr;
                corners[1].x = points[i].x;
                corners[1].y = points[i].y;
            }
       } else {
            //we are again in the same quadrant as the origional angle
            pxsum2++;
       }

       #ifdef VISUAL_DEBUG_X
            //color every quadrant a different color
            if( tempang - theta > 315 ){
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+2]=0;
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+1]=254;
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+0]=254;
            } else if( tempang - theta > 225){
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+2]=254;
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+1]=0;
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+0]=0;
           } else if( tempang - theta > 135) {
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+2]=0;
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+1]=0;
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+0]=254;
           } else if( tempang - theta > 45) {
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+2]=254;
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+1]=0;
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+0]=254;
           } else {
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+2]=0;
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+1]=254;
                debug->imageData[3*points[i].y*debug->width+3*points[i].x+0]=0;
           }
        #endif
    }

    #ifdef VISUAL_DEBUG_X
        //draw a line between consecutive corners
        CvScalar boxcolor = {{0, 254, 0}};
        cvLine(debug, corners[0], corners[1], boxcolor, 1, 8, 0);
        cvLine(debug, corners[1], corners[2], boxcolor, 1, 8, 0);
        cvLine(debug, corners[2], corners[3], boxcolor, 1, 8, 0);
        cvLine(debug, corners[3], corners[0], boxcolor, 1, 8, 0);
        
        //draw a circle at the center of the image
        CvPoint center = {cent_x, cent_y};
        CvScalar centercolor = {{0, 0, 254}};
        cvCircle(debug, center, 5, centercolor, 2, 8, 0);

        //mark corner 0
        CvPoint corner0 = {corners[0].x, corners[0].y};
        CvScalar cornercolor = {{254, 0, 254}};
        cvCircle(debug, corner0, 5, cornercolor, 1, 8, 0);

    #endif

    //load template
    IplImage* xtemplate = cvLoadImage("../vision/xtemplate.png",CV_LOAD_IMAGE_GRAYSCALE);
    //get transformation matrix that would place corner values
        //in the correct location to compare to template

    //transform image
        //create an array for the 4 src points and dest points
        CvPoint2D32f* src;
        CvPoint2D32f* dst;
        src = (CvPoint2D32f*)calloc(4, sizeof(CvPoint2D32f));
        dst = (CvPoint2D32f*)calloc(4, sizeof(CvPoint2D32f));

        //populate the src array 
        if( pxsum1 >= pxsum2 ){
            for ( i=0; i<4; i++){
                src[i].x = corners[i].x;
                src[i].y = corners[i].y;
            }
        }else{
            for ( i=0; i<4; i++){
                int j = i-1;
                if ( j<0 ) j = 3;
                src[i].x = corners[j].x;
                src[i].y = corners[j].y;
            }
        }
        //populate the dst array
        dst[0].x = 0;
        dst[0].y = 0;
        dst[1].x = 0;
        dst[1].y = 100;
        dst[2].x = 100;
        dst[2].y = 100;
        dst[3].x = 100;
        dst[3].y = 0;

        //get the transformation matrix
        CvMat* tmatrix = cvCreateMat(3,3,CV_32FC1);
        tmatrix = cvGetPerspectiveTransform( src, dst, tmatrix);

        //transform the image
        CvSize warpedsize = {100,100};
        IplImage* warped = cvCreateImage(warpedsize, 8, 1);
        CvScalar fillcolor = {{0}};
        cvWarpPerspective(binary, warped, tmatrix,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS , fillcolor);

    #ifdef VISUAL_DEBUG_X
        IplImage* compared = cvCreateImage(cvGetSize(xtemplate),8,3);
    #endif

    //XOR the template image with our warped image 
    int xor_sum = 0;
    for ( i=xtemplate->width*xtemplate->height -1; i>=0; i--){
        int p1 = xtemplate->imageData[i];
        int p2 = warped->imageData[i];
        if(( p1 != 0 &&  p2 != 0 ) || ( p1 == 0 && p2 == 0 )){
            xor_sum++;
        }
       
        #ifdef VISUAL_DEBUG_X
        if(p1 != 0 && p2 != 0){
            compared->imageData[i*3+0] = 0x00;
            compared->imageData[i*3+1] = 0xff;
            compared->imageData[i*3+2] = 0x00;
        }else if(p1 == 0 && p2 == 0){
            compared->imageData[i*3+0] = 0x00;
            compared->imageData[i*3+1] = 0xff;
            compared->imageData[i*3+2] = 0x00;
        }else if(p1 != 0){
            compared->imageData[i*3+0] = 0xff;
            compared->imageData[i*3+1] = 0x00;
            compared->imageData[i*3+2] = 0x00;
        }else if(p2 != 0){
            compared->imageData[i*3+0] = 0xff;
            compared->imageData[i*3+1] = 0x00;
            compared->imageData[i*3+2] = 0x00;
        }
        #endif
    }

    //compute confidence
    int confidence = xor_sum * 100 / (xtemplate->width*xtemplate->height);

    #ifdef VISUAL_DEBUG_X
        cvNamedWindow("Compared",CV_WINDOW_AUTOSIZE);
        cvShowImage("Compared", compared);

        cvNamedWindow("Warped", CV_WINDOW_AUTOSIZE);
        cvShowImage("Warped", warped);

        cvNamedWindow("Xtemplate", CV_WINDOW_AUTOSIZE);
        cvShowImage("Xtemplate",xtemplate);
    #endif

    //free memory
    cvReleaseImage(&xtemplate);
    cvReleaseImage(&warped);
    free(corners);
    free(src);
    free(dst);
    cvReleaseMat(&tmatrix);
    #ifdef VISUAL_DEBUG_X
        cvReleaseImage(&compared);
    #endif

    return confidence;
}
Ejemplo n.º 19
0
void ProjectionModel::calculateProjection()
{
    if(intrinsic_matrix != 0 && distortion_coeffs != 0)
    {
        int corner_count = Chessboard::board_total;

        cvCvtColor(sourceImg, gray_image, CV_RGB2GRAY);

        int found = cvFindChessboardCorners(gray_image,
                                            board_sz,
                                            corners,
                                            &corner_count,
                                            CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
        if(!found)
        {
            return;
        }

        cvFindCornerSubPix(gray_image,
                           corners,
                           corner_count,
                           cvSize(11,11),
                           cvSize(-1,-1),
                           cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1));

        objPts[0].x = 0;
        objPts[0].y = 0;
        objPts[1].x = Chessboard::board_w - 1;
        objPts[1].y = 0;
        objPts[2].x = 0;
        objPts[2].y = Chessboard::board_h - 1;
        objPts[3].x = Chessboard::board_w - 1;
        objPts[3].y = Chessboard::board_h - 1;
        imgPts[3] = corners[0];
        imgPts[2] = corners[Chessboard::board_w - 1];
        imgPts[1] = corners[(Chessboard::board_h - 1) * Chessboard::board_w];
        imgPts[0] = corners[(Chessboard::board_h - 1) * Chessboard::board_w + Chessboard::board_w - 1];

        cvGetPerspectiveTransform(objPts, imgPts, H);

        for(int i = 0; i < 4; ++i)
        {
            CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
            CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) = cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
        }

        cvFindExtrinsicCameraParams2(object_points,
                                     image_points,
                                     intrinsic_matrix,
                                     distortion_coeffs,
                                     rotation_vector,
                                     translation_vector);

        cvRodrigues2(rotation_vector, rotation_matrix);

        for(int f = 0; f < 3; f++)
        {
            for(int c = 0; c < 3; c++)
            {
                fullMatrix[c * 4 + f] = rotation_matrix->data.fl[f * 3 + c];   //transposed
            }
        }

        fullMatrix[3] = 0.0;
        fullMatrix[7] = 0.0;
        fullMatrix[11] = 0.0;
        fullMatrix[12] = translation_vector->data.fl[0];
        fullMatrix[13] = translation_vector->data.fl[1];
        fullMatrix[14] = translation_vector->data.fl[2];
        fullMatrix[15] = 1.0;
    }
}
Ejemplo n.º 20
0
int main( int argc, char** argv )
{
    IplImage *current_frame=NULL;
	CvSize size;
	size.height = 300; size.width = 200;
	IplImage *corrected_frame = cvCreateImage( size, IPL_DEPTH_8U, 3 );
	IplImage *labelled_image=NULL;
	IplImage *vertical_edge_image=NULL;
    int user_clicked_key=0;
    
    // Load the video (AVI) file
    CvCapture *capture = cvCaptureFromAVI( "./Postboxes.avi" );
    // Ensure AVI opened properly
    if( !capture )
		return 1;    
    
    // Get Frames Per Second in order to playback the video at the correct speed
    int fps = ( int )cvGetCaptureProperty( capture, CV_CAP_PROP_FPS );
    
	// Explain the User Interface
    printf( "Hot keys: \n"
		    "\tESC - quit the program\n"
            "\tSPACE - pause/resume the video\n");

	CvPoint2D32f from_points[4] = { {3, 6}, {221, 11}, {206, 368}, {18, 373} };
	CvPoint2D32f to_points[4] = { {0, 0}, {200, 0}, {200, 300}, {0, 300} };
	CvMat* warp_matrix = cvCreateMat( 3,3,CV_32FC1 );
	cvGetPerspectiveTransform( from_points, to_points, warp_matrix );

	// Create display windows for images
	cvNamedWindow( "Input video", 0 );
	cvNamedWindow( "Vertical edges", 0 );
    cvNamedWindow( "Results", 0 );

	// Setup mouse callback on the original image so that the user can see image values as they move the
	// cursor over the image.
    cvSetMouseCallback( "Input video", on_mouse_show_values, 0 );
	window_name_for_on_mouse_show_values="Input video";

    while( user_clicked_key != ESC ) {
		// Get current video frame
        current_frame = cvQueryFrame( capture );
		image_for_on_mouse_show_values=current_frame; // Assign image for mouse callback
        if( !current_frame ) // No new frame available
			break;

		cvWarpPerspective( current_frame, corrected_frame, warp_matrix );

		if (labelled_image == NULL)
		{	// The first time around the loop create the image for processing
			labelled_image = cvCloneImage( corrected_frame );
			vertical_edge_image = cvCloneImage( corrected_frame );
		}
		check_postboxes( corrected_frame, labelled_image, vertical_edge_image );

		// Display the current frame and results of processing
        cvShowImage( "Input video", current_frame );
        cvShowImage( "Vertical edges", vertical_edge_image );
        cvShowImage( "Results", labelled_image );
        
        // Wait for the delay between frames
        user_clicked_key = cvWaitKey( 1000 / fps );
		if (user_clicked_key == ' ')
		{
			user_clicked_key = cvWaitKey(0);
		}
	}
    
    /* free memory */
    cvReleaseCapture( &capture );
    cvDestroyWindow( "video" );
 
    return 0;
}
Ejemplo n.º 21
0
void main(int argc, char** argv)
{
	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);
	float Z=1;
	/*cvNamedWindow("img", CV_WINDOW_AUTOSIZE);
	cvNamedWindow("warp", CV_WINDOW_AUTOSIZE);*/

	dstQuad[0].x = 250; //src Top left
	dstQuad[0].y = 100;
	dstQuad[1].x = 430; //src Top right
	dstQuad[1].y = 115;
	dstQuad[2].x = 50; //src Bottom left
	dstQuad[2].y = 170;
	dstQuad[3].x = 630; //src Bot right
	dstQuad[3].y = 250;

	int lOff = 50, tOff = 150;
	srcQuad[0].x = tOff; //dst Top left
	srcQuad[0].y = lOff;
	srcQuad[1].x = 640-tOff; //dst Top right
	srcQuad[1].y = lOff;
	srcQuad[2].x = tOff; //dst Bottom left
	srcQuad[2].y = 480-lOff;
	srcQuad[3].x = 640-tOff; //dst Bot right
	srcQuad[3].y = 480-lOff;

	cvGetPerspectiveTransform(srcQuad, dstQuad,	warp_matrix);

	int ik=0, ni = 0, niX = 22-1;
	char names[22][25] = {
							"../../Data/6 Dec/009.jpg", 
							"../../Data/6 Dec/011.jpg",
							"../../Data/6 Dec/012.jpg",
							"../../Data/6 Dec/016.jpg",
							"../../Data/6 Dec/018.jpg",
							"../../Data/6 Dec/019.jpg",
							"../../Data/6 Dec/020.jpg",
							"../../Data/6 Dec/022.jpg",
							"../../Data/6 Dec/024.jpg",
							"../../Data/6 Dec/064.jpg",
							"../../Data/6 Dec/065.jpg",
							"../../Data/6 Dec/066.jpg",
							"../../Data/6 Dec/067.jpg",
							"../../Data/6 Dec/068.jpg",
							"../../Data/6 Dec/069.jpg",
							"../../Data/6 Dec/070.jpg",
							"../../Data/6 Dec/071.jpg",
							"../../Data/6 Dec/072.jpg",
							"../../Data/6 Dec/073.jpg",
							"../../Data/6 Dec/074.jpg",
							"../../Data/6 Dec/075.jpg",
							"../../Data/6 Dec/076.jpg"
						};
	int lwSum = 0, nopf = 0;
	//CvCapture *capture = cvCaptureFromCAM(0);
	/*double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
	IplImage* image = cvRetrieveFrame(capture);
	CvSize imgSize;
    imgSize.width = image->width;
    imgSize.height = image->height;
	CvVideoWriter *writer = cvCreateVideoWriter("out.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, imgSize);*/
	while(1)
	{
		//IplImage* img = cvQueryFrame(capture);
		IplImage* img = cvLoadImage( "../../Data/23 Jan/c.jpg", CV_LOAD_IMAGE_COLOR);
		//cvSaveImage(nameGen(ik++), img, 0);
		//cvShowImage("img", img);

		IplImage* warp_img = cvCloneImage(img);
		CV_MAT_ELEM(*warp_matrix, float, 2, 2) = Z;
		cvWarpPerspective(img, warp_img, warp_matrix, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		//cvReleaseImage(&img);
		//cvWaitKey(0);

		IplImage* grayimg = cvCreateImage(cvGetSize(warp_img),IPL_DEPTH_8U,1);
		cvCvtColor( warp_img, grayimg, CV_RGB2GRAY );
		cvReleaseImage(&warp_img);
		
		cvSmooth(grayimg, grayimg, CV_GAUSSIAN, 3, 3, 0.0, 0.0);
		cvEqualizeHist(grayimg, grayimg);
		cvThreshold(grayimg, grayimg, PercentileThreshold(grayimg, 10.0), 255, CV_THRESH_BINARY);
		
		IplImage* finalimg = cvCreateImage(cvGetSize(grayimg),IPL_DEPTH_8U,3);
		CvMemStorage* line_storage=cvCreateMemStorage(0);

		CvSeq* results =  cvHoughLines2(grayimg,line_storage,CV_HOUGH_PROBABILISTIC,10,CV_PI/180*5,350,100,10);
		cvReleaseImage(&grayimg);

		double angle = 0.0, temp;
		double lengthSqd, wSum=0;
		CvPoint center = cvPoint(0, 0);
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			//lengthSqd = (line[0].x - line[1].x)*(line[0].x - line[1].x) + (line[0].y - line[1].y)*(line[0].y - line[1].y);
			wSum += 1;//lengthSqd;
			if(line[0].y > line[1].y)
				temp = atan((line[0].y - line[1].y + 0.0) / (line[0].x - line[1].x));
			else
				temp = atan((line[1].y - line[0].y + 0.0) / (line[1].x - line[0].x));
			if(temp < 0)
				angle += (90 + 180/3.14*temp)/* * lengthSqd*/;
			else
				angle += (180/3.14*temp - 90)/* * lengthSqd*/;
			center.x += (line[0].x + line[1].x)/2;
			center.y += (line[0].y + line[1].y)/2;
		}
		angle /= wSum;	// Angle Direction: Left == -ve and Right == +ve
						// Angle is calculated w.r.t Vertical
		//angle+=10;	// Angle Offset (Depends on camera's position)
		center.x /= results->total;
		center.y /= results->total;

		double m = (angle != 0) ? tan(CV_PI*(0.5-angle/180)) : 100000;	// 100000 represents a very large slope (near vertical)
		//m=-m;		// Slope Correction
		
		CvPoint leftCenter = cvPoint(0, 0), rightCenter = cvPoint(0, 0);
		double leftSlope = 0, rightSlope = 0, leftCount = 0, rightCount = 0;
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			CvPoint midPoint = cvPoint((line[0].x + line[1].x)/2, (line[0].y + line[1].y)/2);
			double L11 = (0-center.y + m*(0-center.x + 0.0))/m;
			double L22 = (midPoint.y-center.y + m*(midPoint.x-center.x + 0.0))/m;
			if(L11*L22 > 0)
			{
				leftCenter.x += midPoint.x;
				leftCenter.y += midPoint.y;
				leftSlope += -(line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				leftCount++;
			}
			else
			{
				rightCenter.x += midPoint.x;
				rightCenter.y += midPoint.y;
				rightSlope += -(line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				rightCount++;
			}
		}
		cvReleaseMemStorage(&line_storage);
		leftCenter.x /= leftCount;		leftCenter.y /= leftCount;		leftSlope /= leftCount;
		rightCenter.x /= rightCount;	rightCenter.y /= rightCount;	rightSlope /= rightCount;
		
		CvPoint botCenter = cvPoint(finalimg->width/2, finalimg->height);
		int dL = abs(botCenter.y-leftCenter.y + m * (botCenter.x-leftCenter.x)) / sqrt(m*m + 1);
		int dR = abs(botCenter.y-rightCenter.y + m * (botCenter.x-rightCenter.x)) / sqrt(m*m + 1);
		
		int lw = abs((leftCenter.y - rightCenter.y) + m*(leftCenter.x - rightCenter.x)) / sqrt(m*m + 1);
		lwSum += lw;
		nopf++;
		
		if(lw <= SINGLE_LANE_WIDTH)
		{
			double L11 = (0-leftCenter.y + m*(0-leftCenter.x + 0.0))/m;
			double L22 = (botCenter.y-leftCenter.y + m*(botCenter.x-leftCenter.x + 0.0))/m;
			if(L11*L22 < 0)
				dR = lwSum/nopf - dL;	// Only Left Lane is visible
			else
				dL = lwSum/nopf - dR;	// Only Right Lane is visible
		}
		
		//cvSaveImage("test.jpg", finalimg, 0);

		printf("Bot:\t(%d, %d, %.3f)\n", dL, (finalimg->height)/10, 90.0-angle);
		printf("Target:\t(%d, %d, %.3f)\n", (dL+dR)/2, (finalimg->height)*9/10, 90.0);

		location bot, target;
		bot.x = dL;		bot.y = (finalimg->height)/10;		bot.theta = 90.0-angle;
		target.x = (dL+dR)/2;	target.y = (finalimg->height)*9/10;	target.theta = 90.0;

		cvReleaseImage(&finalimg);

		list *ol = NULL, *cl = NULL;
		elem e,vare;
		e.l = bot;	e.g = 0;	e.h = 0;	e.id = UNDEFINED;

		int n = 15;
		elem* np = loadPosData(n);
	
		while(1)
		{
			cl = append(cl, e);
			//printList(cl);
			if(isNear(e.l, target))
				break;
			ol = update(ol, e, target, np, n);
			//printList(ol);
			e = findMin(ol);
			//printf("Min: (%.3f, %.3f, %.3f, %d)\n", e.l.x, e.l.y, e.l.theta, e.id);
			ol = detach(ol, e);
			//printList(ol);
			//getchar();
		}
		free(np);

		vare = e;
		printf("(%.3f, %.3f, %.3f) : %d\n", vare.l.x, vare.l.y, vare.l.theta, vare.id);
		while(!((abs(vare.l.x-bot.x) < 1.25) && (abs(vare.l.y-bot.y) < 1.25)))
		{
			vare=search(cl,vare.parent.x,vare.parent.y);
			if(vare.id != -1)
			{
				printf("(%.3f, %.3f, %.3f) : %d\n", vare.l.x, vare.l.y, vare.l.theta, vare.id);
				e = vare;
			}
		}
		printf("\n(%.3f, %.3f, %.3f) : %d\n", e.l.x, e.l.y, e.l.theta, e.id);
		//navCommand(10-e.id, e.id);

		releaseList(ol);
		releaseList(cl);
		
		getchar();
		int c = cvWaitKey(0);
		if(c == '4')
		{
			if(ni != 0)
				ni--;
		}
		else if(c == '6')
		{
			if(ni != niX)
				ni++;
		}
	}
}
Ejemplo n.º 22
0
/* Warps source into destination by a perspective transform */
static void cvWarpPerspective( CvArr* src, CvArr* dst, double quad[4][2] )
{
    CV_FUNCNAME( "cvWarpPerspective" );

    __BEGIN__;

#ifdef __IPL_H__
    IplImage src_stub, dst_stub;
    IplImage* src_img;
    IplImage* dst_img;
    CV_CALL( src_img = cvGetImage( src, &src_stub ) );
    CV_CALL( dst_img = cvGetImage( dst, &dst_stub ) );
    iplWarpPerspectiveQ( src_img, dst_img, quad, IPL_WARP_R_TO_Q,
                         IPL_INTER_CUBIC | IPL_SMOOTH_EDGE );
#else

    int fill_value = 0;

    double c[3][3]; /* transformation coefficients */
    double q[4][2]; /* rearranged quad */

    int left = 0;
    int right = 0;
    int next_right = 0;
    int next_left = 0;
    double y_min = 0;
    double y_max = 0;
    double k_left, b_left, k_right, b_right;

    uchar* src_data;
    int src_step;
    CvSize src_size;

    uchar* dst_data;
    int dst_step;
    CvSize dst_size;

    double d = 0;
    int direction = 0;
    int i;

    if( !src || (!CV_IS_IMAGE( src ) && !CV_IS_MAT( src )) ||
        cvGetElemType( src ) != CV_8UC1 ||
        cvGetDims( src ) != 2 )
    {
        CV_ERROR( CV_StsBadArg,
            "Source must be two-dimensional array of CV_8UC1 type." );
    }
    if( !dst || (!CV_IS_IMAGE( dst ) && !CV_IS_MAT( dst )) ||
        cvGetElemType( dst ) != CV_8UC1 ||
        cvGetDims( dst ) != 2 )
    {
        CV_ERROR( CV_StsBadArg,
            "Destination must be two-dimensional array of CV_8UC1 type." );
    }

    CV_CALL( cvGetRawData( src, &src_data, &src_step, &src_size ) );
    CV_CALL( cvGetRawData( dst, &dst_data, &dst_step, &dst_size ) );

    CV_CALL( cvGetPerspectiveTransform( src_size, quad, c ) );

    /* if direction > 0 then vertices in quad follow in a CW direction,
       otherwise they follow in a CCW direction */
    direction = 0;
    for( i = 0; i < 4; ++i )
    {
        int ni = i + 1; if( ni == 4 ) ni = 0;
        int pi = i - 1; if( pi == -1 ) pi = 3;

        d = (quad[i][0] - quad[pi][0])*(quad[ni][1] - quad[i][1]) -
            (quad[i][1] - quad[pi][1])*(quad[ni][0] - quad[i][0]);
        int cur_direction = CV_SIGN(d);
        if( direction == 0 )
        {
            direction = cur_direction;
        }
        else if( direction * cur_direction < 0 )
        {
            direction = 0;
            break;
        }
    }
    if( direction == 0 )
    {
        CV_ERROR( CV_StsBadArg, "Quadrangle is nonconvex or degenerated." );
    }

    /* <left> is the index of the topmost quad vertice
       if there are two such vertices <left> is the leftmost one */
    left = 0;
    for( i = 1; i < 4; ++i )
    {
        if( (quad[i][1] < quad[left][1]) ||
            ((quad[i][1] == quad[left][1]) && (quad[i][0] < quad[left][0])) )
        {
            left = i;
        }
    }
    /* rearrange <quad> vertices in such way that they follow in a CW
       direction and the first vertice is the topmost one and put them
       into <q> */
    if( direction > 0 )
    {
        for( i = left; i < 4; ++i )
        {
            q[i-left][0] = quad[i][0];
            q[i-left][1] = quad[i][1];
        }
        for( i = 0; i < left; ++i )
        {
            q[4-left+i][0] = quad[i][0];
            q[4-left+i][1] = quad[i][1];
        }
    }
    else
    {
        for( i = left; i >= 0; --i )
        {
            q[left-i][0] = quad[i][0];
            q[left-i][1] = quad[i][1];
        }
        for( i = 3; i > left; --i )
        {
            q[4+left-i][0] = quad[i][0];
            q[4+left-i][1] = quad[i][1];
        }
    }

    left = right = 0;
    /* if there are two topmost points, <right> is the index of the rightmost one
       otherwise <right> */
    if( q[left][1] == q[left+1][1] )
    {
        right = 1;
    }

    /* <next_left> follows <left> in a CCW direction */
    next_left = 3;
    /* <next_right> follows <right> in a CW direction */
    next_right = right + 1;

    /* subtraction of 1 prevents skipping of the first row */
    y_min = q[left][1] - 1;

    /* left edge equation: y = k_left * x + b_left */
    k_left = (q[left][0] - q[next_left][0]) /
               (q[left][1] - q[next_left][1]);
    b_left = (q[left][1] * q[next_left][0] -
               q[left][0] * q[next_left][1]) /
                 (q[left][1] - q[next_left][1]);

    /* right edge equation: y = k_right * x + b_right */
    k_right = (q[right][0] - q[next_right][0]) /
               (q[right][1] - q[next_right][1]);
    b_right = (q[right][1] * q[next_right][0] -
               q[right][0] * q[next_right][1]) /
                 (q[right][1] - q[next_right][1]);

    for(;;)
    {
        int x, y;

        y_max = MIN( q[next_left][1], q[next_right][1] );

        int iy_min = MAX( cvRound(y_min), 0 ) + 1;
        int iy_max = MIN( cvRound(y_max), dst_size.height - 1 );

        double x_min = k_left * iy_min + b_left;
        double x_max = k_right * iy_min + b_right;

        /* walk through the destination quadrangle row by row */
        for( y = iy_min; y <= iy_max; ++y )
        {
            int ix_min = MAX( cvRound( x_min ), 0 );
            int ix_max = MIN( cvRound( x_max ), dst_size.width - 1 );

            for( x = ix_min; x <= ix_max; ++x )
            {
                /* calculate coordinates of the corresponding source array point */
                double div = (c[2][0] * x + c[2][1] * y + c[2][2]);
                double src_x = (c[0][0] * x + c[0][1] * y + c[0][2]) / div;
                double src_y = (c[1][0] * x + c[1][1] * y + c[1][2]) / div;

                int isrc_x = cvFloor( src_x );
                int isrc_y = cvFloor( src_y );
                double delta_x = src_x - isrc_x;
                double delta_y = src_y - isrc_y;

                uchar* s = src_data + isrc_y * src_step + isrc_x;

                int i00, i10, i01, i11;
                i00 = i10 = i01 = i11 = (int) fill_value;

                /* linear interpolation using 2x2 neighborhood */
                if( isrc_x >= 0 && isrc_x <= src_size.width &&
                    isrc_y >= 0 && isrc_y <= src_size.height )
                {
                    i00 = s[0];
                }
                if( isrc_x >= -1 && isrc_x < src_size.width &&
                    isrc_y >= 0 && isrc_y <= src_size.height )
                {
                    i10 = s[1];
                }
                if( isrc_x >= 0 && isrc_x <= src_size.width &&
                    isrc_y >= -1 && isrc_y < src_size.height )
                {
                    i01 = s[src_step];
                }
                if( isrc_x >= -1 && isrc_x < src_size.width &&
                    isrc_y >= -1 && isrc_y < src_size.height )
                {
                    i11 = s[src_step+1];
                }

                double i0 = i00 + (i10 - i00)*delta_x;
                double i1 = i01 + (i11 - i01)*delta_x;

                ((uchar*)(dst_data + y * dst_step))[x] = (uchar) (i0 + (i1 - i0)*delta_y);
            }
            x_min += k_left;
            x_max += k_right;
        }

        if( (next_left == next_right) ||
            (next_left+1 == next_right && q[next_left][1] == q[next_right][1]) )
        {
            break;
        }

        if( y_max == q[next_left][1] )
        {
            left = next_left;
            next_left = left - 1;

            k_left = (q[left][0] - q[next_left][0]) /
                       (q[left][1] - q[next_left][1]);
            b_left = (q[left][1] * q[next_left][0] -
                       q[left][0] * q[next_left][1]) /
                         (q[left][1] - q[next_left][1]);
        }
        if( y_max == q[next_right][1] )
        {
            right = next_right;
            next_right = right + 1;

            k_right = (q[right][0] - q[next_right][0]) /
                       (q[right][1] - q[next_right][1]);
            b_right = (q[right][1] * q[next_right][0] -
                       q[right][0] * q[next_right][1]) /
                         (q[right][1] - q[next_right][1]);
        }
        y_min = y_max;
    }
#endif /* #ifndef __IPL_H__ */

    __END__;
}
Ejemplo n.º 23
0
void main(int argc, char** argv)
{
	cvNamedWindow("src",0 );
	cvNamedWindow("warp image",0 );
	cvNamedWindow("warp image (grey)",0 );
	cvNamedWindow("Smoothed warped gray",0 );
	cvNamedWindow("threshold image",0 );
	cvNamedWindow("canny",0 );
	cvNamedWindow("final",1 );
		
	CvPoint2D32f srcQuad[4], dstQuad[4];
	CvMat* warp_matrix = cvCreateMat(3,3,CV_32FC1);
	float Z=1;

	dstQuad[0].x = 216; //src Top left
	dstQuad[0].y = 15;
	dstQuad[1].x = 392; //src Top right
	dstQuad[1].y = 6;
	dstQuad[2].x = 12; //src Bottom left
	dstQuad[2].y = 187;
	dstQuad[3].x = 620; //src Bot right
	dstQuad[3].y = 159;

	srcQuad[0].x = 100; //dst Top left
	srcQuad[0].y = 120;
	srcQuad[1].x = 540; //dst Top right
	srcQuad[1].y = 120;
	srcQuad[2].x = 100; //dst Bottom left
	srcQuad[2].y = 360;
	srcQuad[3].x = 540; //dst Bot right
	srcQuad[3].y = 360;

	cvGetPerspectiveTransform(srcQuad, dstQuad,	warp_matrix);
	
	//CvCapture *capture = cvCaptureFromCAM(0);
	/*double fps = cvGetCaptureProperty(capture, CV_CAP_PROP_FPS);
	IplImage* image = cvRetrieveFrame(capture);
	CvSize imgSize;
    imgSize.width = image->width;
    imgSize.height = image->height;
	CvVideoWriter *writer = cvCreateVideoWriter("out.avi", CV_FOURCC('M', 'J', 'P', 'G'), fps, imgSize);*/
	int ik=0;
	while(1)
	{
		//IplImage* img = cvQueryFrame(capture);
		IplImage* img = cvLoadImage( "../../Data/6 Dec/009.jpg", CV_LOAD_IMAGE_COLOR);
		cvShowImage( "src", img );
		//cvWriteFrame(writer, img);
		//cvSaveImage(nameGen(ik++), img, 0);
		
		IplImage* warp_img = cvCloneImage(img);
		CV_MAT_ELEM(*warp_matrix, float, 2, 2) = Z;
		cvWarpPerspective(img, warp_img, warp_matrix, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		cvShowImage( "warp image", warp_img );

		IplImage* grayimg = cvCreateImage(cvGetSize(warp_img),IPL_DEPTH_8U,1);
		cvCvtColor( warp_img, grayimg, CV_RGB2GRAY );
		cvShowImage( "warp image (grey)", grayimg );
		
		cvSmooth(grayimg, grayimg, CV_GAUSSIAN, 3, 3, 0.0, 0.0);
		cvShowImage( "Smoothed warped gray", grayimg );
		
		IplImage* thresholded_img=simplethreshold(grayimg, 220);
		cvShowImage("threshold image",thresholded_img);

		//grayimg = doCanny( thresholded_img, 50, 100, 3 );
		grayimg = cvCloneImage(thresholded_img);
		cvShowImage("canny",grayimg);

		IplImage* finalimg = cvCreateImage(cvGetSize(grayimg),IPL_DEPTH_8U,3);
		CvMemStorage* line_storage=cvCreateMemStorage(0);

		CvSeq* results =  cvHoughLines2(grayimg,line_storage,CV_HOUGH_PROBABILISTIC,10,CV_PI/180*5,350,100,10);
		double angle = 0.0, temp;
		double lengthSqd, wSum=0;
		double xc = 0, yc = 0;
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			cvLine( finalimg, line[0], line[1], CV_RGB(0,0,255), 1, CV_AA, 0 );
			//lengthSqd = (line[0].x - line[1].x)*(line[0].x - line[1].x) + (line[0].y - line[1].y)*(line[0].y - line[1].y);
			wSum += 1;//lengthSqd;
			if(line[0].y > line[1].y)
				temp = atan((line[0].y - line[1].y + 0.0) / (line[0].x - line[1].x));
			else
				temp = atan((line[1].y - line[0].y + 0.0) / (line[1].x - line[0].x));
			if(temp < 0)
				angle += (90 + 180/3.14*temp)/* * lengthSqd*/;
			else
				angle += (180/3.14*temp - 90)/* * lengthSqd*/;
			xc += line[0].x + line[1].x;
			yc += line[0].y + line[1].y;
		}
		angle=angle/wSum;
		//angle+=10;
		printf("total: %d, angle: % f\n", results->total, angle);

		xc /= 2*results->total;
		yc /= 2*results->total;
		double m = (angle != 0) ? 1/tan(angle*3.14/180) : 100;	// 100 represents a very large slope (near vertical)
		m=-m;

		double x1, y1, x2, y2;	// The Center Line
		y1 = 0;
		y2 = finalimg->height;
		x1 = xc + (y1-yc)/m;
		x2 = xc + (y2-yc)/m; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(0,255,0), 1, CV_AA, 0);
		printf("point: %f\t%f\n", xc, yc);

		double lx=0, ly=0, lm=0, lc=0, rx=0, ry=0, rm=0, rc=0;
		for( int i = 0; i < results->total; i++ )
		{
			CvPoint* line = (CvPoint*)cvGetSeqElem(results,i);
			double xm = (line[0].x + line[1].x)/2.0, ym = (line[0].y + line[1].y)/2.0;
			if(ym - yc - m*(xm - xc) > 0)
			{
				lx += xm;
				ly += ym;
				lm += (line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				lc++;
			}
			else
			{
				rx += xm;
				ry += ym;
				rm += (line[1].y - line[0].y)/(line[1].x - line[0].x+0.0001);
				rc++;
			}
		}

		// The Left Line
		lx /= lc;	ly /= lc;	lm /= lc;
		rx /= rc;	ry /= rc;	rm /= rc;
		printf("lins: %f\t%f\t%f\n", lx, ly, lm);
		printf("lins: %f\t%f\t%f\n", rx, ry, rm);
		y1 = 0;
		y2 = finalimg->height-5;
		x1 = lx + (y1-ly)/lm;
		x2 = lx + (y2-ly)/lm; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(255,255,0), 1, CV_AA, 0);

		// The Right Line
		y1 = 0;
		y2 = finalimg->height-5;
		x1 = rx + (y1-ry)/rm;
		x2 = rx + (y2-ry)/rm; 
		cvLine(finalimg, cvPoint(x1, y1), cvPoint(x2, y2), CV_RGB(0,255,255), 1, CV_AA, 0);

		// The Center Point
		CvPoint vpt = cvPoint(finalimg->width/2, 416);
		printf("center point: %d\t%d\n", vpt.x, vpt.y);
		
		// The Dl and Dr
		int dl = vpt.x - lx + (ly-vpt.y+0.0)/lm;
		int dr = (vpt.y-ry+0.0)/rm + rx - vpt.x;
		printf("dl-dr: %d\n", dl-dr);

		cvShowImage("final",finalimg);

		if(dl-dr < SAFEZONE_LL)	// Assume that the bot lies just on the boundary of the safe zone
		{
			if(angle < -10)
			{
				navCommand(7, angle);
			}
			else
			{
				navCommand(7, angle);
			}
		}	
		else if(dl-dr > SAFEZONE_RL)
		{
			if(angle > 10)
			{
				navCommand(-7, angle);
			}
			else
			{
				navCommand(-7, angle);
			}
		}
		else
		{
			if((angle < 10) && (angle > -10))
			{
				navCommand(angle, angle);
			}
			else
			{
				navCommand(0, angle);
			}
		}

		cvWaitKey(0);
	}
}
Ejemplo n.º 24
0
int main(int argc, char *argv[])
{
	if (argc != 6) {
		printf("\nERROR: too few parameters\n");
		help();
		return -1;
	}
	help();
	//INPUT PARAMETERS:
	int board_w = atoi(argv[1]);
	int board_h = atoi(argv[2]);
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
	CvMat *distortion = (CvMat *) cvLoad(argv[4]);
	IplImage *image = 0, *gray_image = 0;
	if ((image = cvLoadImage(argv[5])) == 0) {
		printf("Error: Couldn't load %s\n", argv[5]);
		return -1;
	}
	gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	//UNDISTORT OUR IMAGE
	IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
	IplImage *t = cvCloneImage(image);
	cvRemap(t, image, mapx, mapy);

	//GET THE CHECKERBOARD ON THE PLANE
	cvNamedWindow("Checkers");
	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(image,
										board_sz,
										corners,
										&corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);
	if (!found) {
		printf
			("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
			 argv[5], corner_count, board_n);
		return -1;
	}
	//Get Subpixel accuracy on those corners
	cvFindCornerSubPix(gray_image, corners, corner_count,
					   cvSize(11, 11), cvSize(-1, -1),
					   cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
									  0.1));

	//GET THE IMAGE AND OBJECT POINTS:
	//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
	//That means corners are at: corners[r*board_w + c]
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;
	objPts[0].y = 0;
	objPts[1].x = board_w - 1;
	objPts[1].y = 0;
	objPts[2].x = 0;
	objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;
	objPts[3].y = board_h - 1;
	imgPts[0] = corners[0];
	imgPts[1] = corners[board_w - 1];
	imgPts[2] = corners[(board_h - 1) * board_w];
	imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];

	//DRAW THE POINTS in order: B,G,R,YELLOW
	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	//DRAW THE FOUND CHECKERBOARD
	cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
	cvShowImage("Checkers", image);

	//FIND THE HOMOGRAPHY
	CvMat *H = cvCreateMat(3, 3, CV_32F);
	CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
	float Z = 25;
	int key = 0;
	IplImage *birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");
	while (key != 27) {			//escape key stops
		CV_MAT_ELEM(*H, float, 2, 2) = Z;
//     cvInvert(H,H_invt); //If you want to invert the homography directly
//     cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
		//USE HOMOGRAPHY TO REMAP THE VIEW
		cvWarpPerspective(image, birds_image, H,
						  CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
						  CV_WARP_FILL_OUTLIERS);
		cvShowImage("Birds_Eye", birds_image);
		key = cvWaitKey();
		if (key == 'u')
			Z += 0.5;
		if (key == 'd')
			Z -= 0.5;
	}

	//SHOW ROTATION AND TRANSLATION VECTORS
	CvMat *image_points = cvCreateMat(4, 1, CV_32FC2);
	CvMat *object_points = cvCreateMat(4, 1, CV_32FC3);
	for (int i = 0; i < 4; ++i) {
		CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
		CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) =
			cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
	}

	CvMat *RotRodrigues = cvCreateMat(3, 1, CV_32F);
	CvMat *Rot = cvCreateMat(3, 3, CV_32F);
	CvMat *Trans = cvCreateMat(3, 1, CV_32F);
	cvFindExtrinsicCameraParams2(object_points, image_points,
								 intrinsic, distortion, RotRodrigues, Trans);
	cvRodrigues2(RotRodrigues, Rot);

	//SAVE AND EXIT
	cvSave("Rot.xml", Rot);
	cvSave("Trans.xml", Trans);
	cvSave("H.xml", H);
	cvInvert(H, H_invt);
	cvSave("H_invt.xml", H_invt);	//Bottom row of H invert is horizon line
	return 0;
}
RTC::ReturnCode_t ImageCalibration::onExecute(RTC::UniqueId ec_id)
{	
	
	board_sz = cvSize(m_board_w, m_board_h);
	
	//Calibrationパターンを計算する。
	if (m_inputImageIn.isNew()) {

		m_inputImageIn.read();

		if(m_keyIn.isNew()){
			m_keyIn.read();
			key = (int)m_key.data;
		}
		
		if(g_temp_w != m_inputImage.width || g_temp_h != m_inputImage.height){
		
			inputImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			outputImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			tempImage_buff = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			undistortionImage = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			birds_image = cvCreateImage(cvSize(m_inputImage.width, m_inputImage.height), 8, 3);
			
			intrinsicMatrix = cvCreateMat(3,3,CV_64FC1);
			distortionCoefficient = cvCreateMat(4,1,CV_64FC1);
			
			captureCount = 0;
			findFlag = 0;

			mapx = cvCreateImage( cvSize(m_inputImage.width, m_inputImage.height), IPL_DEPTH_32F, 1);
			mapy = cvCreateImage( cvSize(m_inputImage.width, m_inputImage.height), IPL_DEPTH_32F, 1);

			corners = new CvPoint2D32f[m_board_w * m_board_h];
			
			g_temp_w = m_inputImage.width;
			g_temp_h = m_inputImage.height;
		
		}

		//Capture開始する。
		memcpy(inputImage_buff->imageData,(void *)&(m_inputImage.pixels[0]), m_inputImage.pixels.length());

//		tempImage_buff = cvCloneImage(inputImage_buff);
		//OutPortに出力する。
		int len = inputImage_buff->nChannels * inputImage_buff->width * inputImage_buff->height;
		m_origImage.pixels.length(len);
		
		memcpy((void *)&(m_origImage.pixels[0]), inputImage_buff->imageData, len);
		m_origImage.width = inputImage_buff->width;
		m_origImage.height = inputImage_buff->height;

		m_origImageOut.write();
		
		//Capture確認用のWindowの生成
		//cvShowImage("Capture", inputImage_buff);
		cvWaitKey(1);
		
		//SpaceBarを押すとサンプル映像5枚を撮る
		if (key == ' ') {
			
			tempImage_buff = cvCloneImage(inputImage_buff);
			//映像を生成する
			IplImage *grayImage = cvCreateImage(cvGetSize(tempImage_buff), 8, 1);

			//行列の生成
			CvMat *worldCoordinates = cvCreateMat((m_board_w * m_board_h) * NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); //世界座標用行列
			CvMat *imageCoordinates = cvCreateMat((m_board_w * m_board_h) * NUM_OF_BACKGROUND_FRAMES ,2, CV_64FC1); //画像座標用行列
			CvMat *pointCounts = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 1, CV_32SC1); //コーナー数の行列
			CvMat *rotationVectors = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); //回転ベクトル
			CvMat *translationVectors = cvCreateMat(NUM_OF_BACKGROUND_FRAMES, 3, CV_64FC1); 

			//世界座標を設定する
			for (int i = 0; i < NUM_OF_BACKGROUND_FRAMES; i++){
				for ( int j = 0; j < (m_board_w * m_board_h); j++) {
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 0, (j % m_board_w) * UNIT);
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 1, (j / m_board_w) * UNIT);
					cvSetReal2D(worldCoordinates, i * (m_board_w * m_board_h) + j, 2, 0.0);
				}
			}

			//コーナー数を設定
			for(int i = 0; i < NUM_OF_BACKGROUND_FRAMES; i++){
				cvSetReal2D(pointCounts, i, 0, (m_board_w * m_board_h));
			}
			
			//コーナーを検出する。
			findFlag = findCorners(tempImage_buff, grayImage, corners);

			if (findFlag != 0) {
			
				//コーナーをすべて検出した場合
				//映像座標を設定する。
				for (;;){
					for (int i = 0; i < (m_board_w * m_board_h); i++){
 						cvSetReal2D(imageCoordinates, captureCount * (m_board_w * m_board_h) + i, 0, corners[i].x);
						cvSetReal2D(imageCoordinates, captureCount * (m_board_w * m_board_h) + i, 1, corners[i].y);
					}
				
					captureCount++;    

					printf("%d枚目キャプチャしました\n", captureCount);

					if (captureCount == NUM_OF_BACKGROUND_FRAMES) {
						//設定した回数チェックパターンを撮った場合
						//カメラパラメータを推定する。
						cvCalibrateCamera2(
							worldCoordinates,
							imageCoordinates,
							pointCounts,
							cvGetSize(inputImage_buff),
							intrinsicMatrix,
							distortionCoefficient,
							rotationVectors,
							translationVectors,
							CALIBRATE_CAMERA_FLAG
						);
						
						//情報をTextとして出力
						printf("\nレンズ歪み係数\n");
						saveRenseMatrix(distortionCoefficient);
						printMatrix("%lf", distortionCoefficient);
						
						//m_renseParameter.data = renseParameters;
												
						printf("\n内部パラメータ\n");
						saveInternalParameterMatrix(intrinsicMatrix);
						printMatrix("%lf ", intrinsicMatrix);

						//m_internalParameter.data = internalParameter;
						
						captureCount = 0;
						break;
						
					}
				}
			}

			if (findFlag != 0){
				InParameter = 1;
			}else if (findFlag == 0) {
				InParameter = 0;
			}
			
			//メモリ解除
			cvReleaseMat(&worldCoordinates);
			cvReleaseMat(&imageCoordinates);
			cvReleaseMat(&pointCounts);
			cvReleaseMat(&rotationVectors);
			cvReleaseMat(&translationVectors);
			cvReleaseImage(&grayImage);

		}
		g_temp_w = m_inputImage.width;
		g_temp_h = m_inputImage.height;

	}
	//外部パターンを取得
	if (key == ' ' && m_inputImageIn.isNew() && InParameter == 1) {

		//行列の生成
		CvMat *worldCoordinates = cvCreateMat((m_board_w * m_board_h), 3, CV_64FC1); //世界座標用行列
		CvMat *imageCoordinates = cvCreateMat((m_board_w * m_board_h), 2, CV_64FC1); //画像座標用行列
		CvMat *rotationVectors = cvCreateMat(1, 3, CV_64FC1); //回転ベクトル
		CvMat *rotationMatrix = cvCreateMat(3, 3, CV_64FC1); //回転行列
		CvMat *translationVectors = cvCreateMat(1, 3, CV_64FC1); 

		//世界座標を設定する
		for (int i = 0; i < (m_board_w * m_board_h); i++){
			cvSetReal2D(worldCoordinates, i, 0, (i % m_board_w) * UNIT);
			cvSetReal2D(worldCoordinates, i, 1, (i / m_board_w) * UNIT);
			cvSetReal2D(worldCoordinates, i, 2, 0.0);
		}
	
		cvWaitKey( 1 );
	
		//	スペースキーが押されたら
		if ( findFlag != 0 ) {
			//	コーナーがすべて検出された場合
			//	画像座標を設定する
			for ( int i = 0; i < (m_board_w * m_board_h); i++ ){
				cvSetReal2D( imageCoordinates, i, 0, corners[i].x);
				cvSetReal2D( imageCoordinates, i, 1, corners[i].y);
			}

			//	外部パラメータを推定する
			cvFindExtrinsicCameraParams2(
				worldCoordinates,
				imageCoordinates,
				intrinsicMatrix,
				distortionCoefficient,
				rotationVectors,
				translationVectors
			);

			//	回転ベクトルを回転行列に変換する
			cvRodrigues2( rotationVectors, rotationMatrix, NULL );

			printf( "\n外部パラメータ\n" );
			printExtrinsicMatrix( rotationMatrix, translationVectors );
			saveExternalParameterMatrix(rotationMatrix, translationVectors);

			m_externalParameter.data = CORBA::string_dup(externalParameter);
			m_renseParameter.data = CORBA::string_dup(renseParameters);
			m_internalParameter.data = CORBA::string_dup(internalParameter);
						
		}
		//メモリを解放
		cvReleaseMat( &worldCoordinates );
		cvReleaseMat( &imageCoordinates );
		cvReleaseMat( &rotationVectors );
		cvReleaseMat( &rotationMatrix );
		cvReleaseMat( &translationVectors );
		
		//X,Y初期化
		cvInitUndistortMap(
			intrinsicMatrix,
			distortionCoefficient,
			mapx,
			mapy
		);
		//外部パラメータ確認フラグ
		outParameter = 1;
		key = 0;
				
	 }
	
	//内部外部パラメータの出力に成功したら
	if (InParameter == 1 && outParameter == 1) {

		//	レンズ歪みを補正した画像を生成する
		cvUndistort2(
			inputImage_buff,
			undistortionImage,
			intrinsicMatrix,
			distortionCoefficient
		);

		//cvShowImage("歪み補正", undistortionImage);

		//OutPortに補正映像を出力する。
		//int len = undistortionImage->nChannels * undistortionImage->width * undistortionImage->height;
		//m_calbImage.pixels.length(len);
		
		//歪み補正映像をOutPortとしてメモリコピーする。
		//memcpy((void *)&(m_calbImage.pixels[0]), undistortionImage->imageData, len);
		//m_calbImageOut.write();
		
		//鳥瞰図の座標設定
		objPts[0].x = 0;					objPts[0].y = 0;
		objPts[1].x = m_board_w-1;			objPts[1].y = 0;
		objPts[2].x = 0;					objPts[2].y = m_board_h-1;
		objPts[3].x = m_board_w-1;			objPts[3].y = m_board_h-1;
		
		//取得するCornerを設定
		imgPts[0] = corners[0];
		imgPts[1] = corners[m_board_w - 1];
		imgPts[2] = corners[(m_board_h - 1) * m_board_w];
		imgPts[3] = corners[(m_board_h - 1) * m_board_w + m_board_w - 1];
		
		//指定したCornerに○を作成する
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0,0,255), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0,255,0), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255,0,0), 3);
		cvCircle(tempImage_buff, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255,255,0), 3);

		CvMat *H = cvCreateMat(3, 3, CV_32F);
		cvGetPerspectiveTransform(objPts, imgPts, H);
		
		//高さを設定する。
		CV_MAT_ELEM(*H, float, 2, 2) = m_camera_Height;
		
		//Warppingを実行
		cvWarpPerspective(inputImage_buff, birds_image, H, CV_INTER_LINEAR | CV_WARP_INVERSE_MAP | CV_WARP_FILL_OUTLIERS);
		
		//鳥瞰図をOutPortに出力する。
		int len = birds_image->nChannels * birds_image->width * birds_image->height;
		m_birdImage.pixels.length(len);
		memcpy((void *)&(m_birdImage.pixels[0]), birds_image->imageData, len);

		m_birdImage.width = inputImage_buff->width;
		m_birdImage.height = inputImage_buff->height;

		m_birdImageOut.write();

		cvWaitKey(10);

		//cvShowImage("Bird_Eye", birds_image);
		cvReleaseMat(&H);

		g_temp_w = m_inputImage.width;
		g_temp_h = m_inputImage.height;

		key = 0;

	}