コード例 #1
0
void ImageOverlayer::MakeGroundPlaneZ(CvScalar color, IplImage* baseImage)
{
  
   int linePoints = 500; 
  
   vector<CvPoint3D32f> Robot_PositionsToReProject;
   Robot_PositionsToReProject.resize(linePoints);
   
   CvMat _Robot_PositionsToReProject = cvMat(1, linePoints, CV_32FC3, &Robot_PositionsToReProject[0]);    
   
    

    for(int pts = 0; pts < linePoints; pts++) //circlePointsPerPosition points out of linePoints for circle
      {
	float zValue = 0 + ((float)(5.0/linePoints))*(float)pts;
	//std::cout<<"xvalue = "<<xValue<<std::endl;
	Robot_PositionsToReProject[pts] = cvPoint3D32f(0, 0, zValue);
      }
     
   
    vector<CvPoint2D32f> reprojectedPoints_Robot;
    reprojectedPoints_Robot.resize(linePoints);
    
    CvMat _imageReprojectedPoints_RobotRight = cvMat(1, linePoints, CV_32FC2, &reprojectedPoints_Robot[0]);
    
    cvProjectPoints2(&_Robot_PositionsToReProject, Rvec_right_n, Tvec_right, &_M2, &_D2, &_imageReprojectedPoints_RobotRight, NULL, NULL, NULL, NULL, NULL); 
   
    for(int pts = 0; pts < linePoints; pts++)
      {    
	CvPoint robot_PointToBeShownRight = cvPoint(reprojectedPoints_Robot[pts].x,reprojectedPoints_Robot[pts].y);
	cvCircle(baseImage, robot_PointToBeShownRight, 0, color, 2, 8, 0);       	
      }
   
}
コード例 #2
0
ファイル: AbstractCamera.cpp プロジェクト: M-Samoht/OpenMocap
AbstractCamera::AbstractCamera(int id, int width, int height, int frameRate, int type) :
	_id(id), _width(width), _height(height), _frameRate(frameRate), _intrinsicParams(NULL), _translation(cvPoint3D32f(0, 0, 0)),
	_rotation(cvPoint3D32f(0, 0, 0)), _distortionCoefficients(NULL),
	_distortionModelX(NULL), _distortionModelY(NULL), _disparityToDepth(NULL) {

	_undistortedFrame =  cvCreateImage(cvSize(width, height), 8, 3);
	_calibrated = false;
	_cameraType = type;
}
コード例 #3
0
// Recursive function to find the neighboring pixels and save them into a table
void findPointRec(vector<CvPoint3D32f> &point, CvPoint pixel, uchar* data, int step){
	if(data[pixel.y * step + pixel.x] != 0){
		CvPoint3D32f current = cvPoint3D32f(pixel.x, pixel.y, data[pixel.y * step + pixel.x]);
		point.push_back(current);
		data[pixel.y * step + pixel.x] = 0;
		
		findPointRec(point, cvPoint(pixel.x-1, pixel.y), data, step);
		findPointRec(point, cvPoint(pixel.x+1, pixel.y), data, step);
		findPointRec(point, cvPoint(pixel.x, pixel.y-1), data, step);
		findPointRec(point, cvPoint(pixel.x, pixel.y+1), data, step);
	}
}
コード例 #4
0
ファイル: virtualplane.cpp プロジェクト: martincos321/kinect
void virtualplane::calcPlano(Mat& depth,Mat& MascaraZona,Mat& Mascara){

    Mat maskTot(480,640,CV_8UC1);
    Mat in(480,640,CV_32FC1);

    maskTot=MascaraZona & Mascara;
    depth.copyTo(in,maskTot);
    int NumeroPuntos=0;

    cout << "VIRTUAL PLANE : PERFORMING FIT, PLEASE WAIT..." << endl;

    //FOR RANSAC
    vector<CvPoint3D32f> datos;
    ransacfit *ranfi=new ransacfit();
    ///////////////
    for(int y=0; y<in.rows; y++)
    {
        for(int x=0; x<in.cols; x++)
        {
            if(in.at<float>(y,x)>0)
            {
                datos.push_back(cvPoint3D32f(x,y,in.at<float>(y,x)));
                NumeroPuntos++;
            }
        }
    }
    ranfi->setData(datos);
        double thrI=0.00015,por1=0.01,por2=0.5;
    int ite=200,maxNum=100;
    //ranfi->doRansac(QP,thrI,por1,por2,ite,maxNum);
    bool continuar=true;

            while(continuar){
            cout << "Repeat with different parameters? (y or n)" << endl;
            char rep;
            cin >> rep;
            if(rep==121){
                cout << endl << "Ideal threshold. Last : "  << thrI << endl;
                cin >> thrI;
                cout << endl <<"Random percent. Last : "  << por1 << endl;
                cin >> por1;
                cout << endl <<"Final percent. Last : "  << por2 << endl;
                cin >> por2;
                cout << endl <<"Max iterations. Last : "  << ite << endl;
                cin >> ite;
                cout << endl <<"Max Number. Last : "  << maxNum << endl;
                cin >> maxNum;
                ranfi->restart();
                //ranfi->doRansac(QP,thrI,por1,por2,ite,maxNum);
            }
            else if(rep==110){
コード例 #5
0
void ImageOverlayer::OverlayEstimatedRobotPose(double x, double y, double z, double thetaRob, CvScalar color, IplImage* baseImage)
{
   
   vector<CvPoint3D32f> Robot_PositionsToReProject;
   Robot_PositionsToReProject.resize(totPntsPerPos);
   
   CvMat _Robot_PositionsToReProject = cvMat(1, totPntsPerPos, CV_32FC3, &Robot_PositionsToReProject[0]);    
  
   
  for(float j=0; j<=robHeight; j+=0.01)
   {
    
    Robot_PositionsToReProject[0] = cvPoint3D32f(x,y,j);
    for(int pts = 0; pts < circlePointsPerPosition; pts++) //circlePointsPerPosition points out of totPntsPerPos for circle
      {
	float theta = -M_PI + (float)pts*2*M_PI/(circlePointsPerPosition);  
	Robot_PositionsToReProject[1 + pts] = cvPoint3D32f( x + robRadius*cosf(theta), y + robRadius*sinf(theta), j);
      }
     
   for(int pts = 0; pts < arrowPointsPerPosition /*&& j==robHeight*/; pts++) //arrowPointsPerPosition points out of totPntsPerPos for th arrow
    {
      Robot_PositionsToReProject[1 + circlePointsPerPosition + pts] = cvPoint3D32f( x + (float)pts*(robRadius/(float)arrowPointsPerPosition)*cosf(thetaRob), y + (float)pts*(robRadius/(float)arrowPointsPerPosition)*sinf(thetaRob), robHeight);
    }
   
    vector<CvPoint2D32f> reprojectedPoints_Robot;
    reprojectedPoints_Robot.resize(totPntsPerPos);
    
    CvMat _imageReprojectedPoints_RobotRight = cvMat(1, totPntsPerPos, CV_32FC2, &reprojectedPoints_Robot[0]);
    
    cvProjectPoints2(&_Robot_PositionsToReProject, Rvec_right_n, Tvec_right, &_M2, &_D2, &_imageReprojectedPoints_RobotRight, NULL, NULL, NULL, NULL, NULL); 
   
    for(int pts = 0; pts < totPntsPerPos; pts++)
      {    
	CvPoint robot_PointToBeShownRight = cvPoint(reprojectedPoints_Robot[pts].x,reprojectedPoints_Robot[pts].y);
	cvCircle(baseImage, robot_PointToBeShownRight, 0, color, 2, 8, 0);       	
      }
   }
}
コード例 #6
0
// Convert from image to camera space by transforming point p in
// the image plane by the camera matrix.
static CvPoint3D32f ImageCStoWorldCS(const Cv3dTrackerCameraInfo &camera_info, CvPoint2D32f p)
{
    float tp[4];
    tp[0] = (float)p.x - camera_info.principal_point.x;
    tp[1] = (float)p.y - camera_info.principal_point.y;
    tp[2] = 1.f;
    tp[3] = 1.f;

    float tr[4];
    //multiply tp by mat to get tr
    MultVectorMatrix(tr, tp, camera_info.mat);

    return cvPoint3D32f(tr[0]/tr[3], tr[1]/tr[3], tr[2]/tr[3]);
}
コード例 #7
0
// Find the intersection of two lines, or if they don't intersect,
// the points of closest approach.
// The lines are defined by (o1,p1) and (o2, p2).
// If they intersect, r1 and r2 will be the same.
// Returns false on error.
static bool intersection(CvPoint3D32f o1, CvPoint3D32f p1,
                         CvPoint3D32f o2, CvPoint3D32f p2,
                         CvPoint3D32f &r1, CvPoint3D32f &r2)
{
    CvPoint3D32f x = o2 - o1;
    CvPoint3D32f d1 = p1 - o1;
    CvPoint3D32f d2 = p2 - o2;

    CvPoint3D32f cross = cvPoint3D32f(d1.y*d2.z - d1.z*d2.y,
                                      d1.z*d2.x - d1.x*d2.z,
                                      d1.x*d2.y - d1.y*d2.x);
    double den = cross.x*cross.x + cross.y*cross.y + cross.z*cross.z;

    if (den < EPS)
        return false;

    double t1 = det(x, d2, cross) / den;
    double t2 = det(x, d1, cross) / den;

    r1 = o1 + d1 * t1;
    r2 = o2 + d2 * t2;

    return true;
}
コード例 #8
0
void ImageOverlayer::initializeCamParams()
{
  int nframes=1;
  int n = 9; //Number of points used to process
  int N=nframes*n;
  
  vector<CvPoint2D32f> temp(n);
  vector<int> npoints;
  vector<CvPoint3D32f> objectPoints;
  vector<CvPoint2D32f> points[2];
  points[0].resize(n);
  points[1].resize(n);
  
  double R[3][3], T[3], E[3][3], F[3][3];
  double Q[4][4];
    

/*************************************************************/    
    
   _M1 = cvMat(3, 3, CV_64F, M1 );
   _M2 = cvMat(3, 3, CV_64F, M2 );
   _D1 = cvMat(1, 5, CV_64F, D1 );
   _D2 = cvMat(1, 5, CV_64F, D2 );
  CvMat _R = cvMat(3, 3, CV_64F, R );
  CvMat _T = cvMat(3, 1, CV_64F, T );
  CvMat _E = cvMat(3, 3, CV_64F, E );
  CvMat _F = cvMat(3, 3, CV_64F, F );
  CvMat _Q = cvMat(4,4, CV_64F, Q);
  
  vector<CvPoint2D32f>& pts = points[0];

  
  //Pontos XY pixels left
  points[0][0].x=34.0;
  points[0][0].y=336.0;
  points[0][1].x=502.0;
  points[0][1].y=156.0;
  points[0][2].x=1280.0;
  points[0][2].y=279.0;
  points[0][3].x=664.0;
  points[0][3].y=174.0;
  points[0][4].x=914.0;
  points[0][4].y=209.0;
  points[0][5].x=248.0;
  points[0][5].y=300.0;
  points[0][6].x=663.0;
  points[0][6].y=482.0;
  points[0][7].x=185.0;
  points[0][7].y=364.0;
  points[0][8].x=400.0;
  points[0][8].y=507.0;  

  
  //Pontos XY pixels right
  points[1][0].x=866.0;
  points[1][0].y=942.0;
  points[1][1].x=98.0;
  points[1][1].y=376.0;
  points[1][2].x=856.0;
  points[1][2].y=72.0;
  points[1][3].x=445.0;
  points[1][3].y=222.0;
  points[1][4].x=690.0;
  points[1][4].y=128.0;
  points[1][5].x=779.0;
  points[1][5].y=442.0;
  points[1][6].x=1162.0;
  points[1][6].y=161.0;
  points[1][7].x=1061.0;
  points[1][7].y=413.0;
  points[1][8].x=1244.0;
  points[1][8].y=215.0;
  
  
  npoints.resize(nframes,n);
  objectPoints.resize(nframes*n);
  
  /* 3D points (x,y,z) related to the 2D points from left and right image - minimum: 8 points*/
  
  objectPoints[0] = cvPoint3D32f(6.0,-4.5,0.0);
  objectPoints[1] = cvPoint3D32f(0.0,-4.5,0.0);
  objectPoints[2] = cvPoint3D32f(0.0,+4.5,0.0);
  objectPoints[3] = cvPoint3D32f(0.0,-1.5,0.0);
  objectPoints[4] = cvPoint3D32f(0.0,+1.5,0.0);
  objectPoints[5] = cvPoint3D32f(4.3,-2.7,0.0);
  objectPoints[6] = cvPoint3D32f(4.3,+2.7,0.0);
  objectPoints[7] = cvPoint3D32f(5.25,-1.75,0.0);
  objectPoints[8] = cvPoint3D32f(5.25,+1.75,0.0);  
  
  for( int i = 1; i < nframes; i++ )
      copy( objectPoints.begin(), objectPoints.begin() + n,
      objectPoints.begin() + i*n );
  
  CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
  CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
  CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
  CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
  
   
   /**************************************************************************/
  double R1[3][3], R2[3][3], P1[3][4], P2[3][4];

  CvMat _R1 = cvMat(3, 3, CV_64F, R1);
  CvMat _R2 = cvMat(3, 3, CV_64F, R2);
  CvMat _P1 = cvMat(3, 4, CV_64F, P1);
  CvMat _P2 = cvMat(3, 4, CV_64F, P2);

/************************************** StereoCalibration - returns R T R1 R2 T1 T2 **************************************/   
  CvSize imageSize = cvSize(1294,964);

  cvStereoCalibrate( &_objectPoints, &_imagePoints1,
      &_imagePoints2, &_npoints,
      &_M1, &_D1, &_M2, &_D2,
      imageSize, &_R, &_T, &_E, &_F,
      cvTermCriteria(CV_TERMCRIT_ITER+
      CV_TERMCRIT_EPS, 30, 1e-5),
      CV_CALIB_USE_INTRINSIC_GUESS+ CV_CALIB_FIX_ASPECT_RATIO);

     
/*************************************************************************************************************************/ 

/***************************************** Extrinsic Parameters **********************************************************/
  CvMat* Rvec_left = cvCreateMat( 3, 1, CV_64F );
  Tvec_left = cvCreateMat( 3, 1, CV_64F );
  CvMat* Rvec_right = cvCreateMat( 3, 1, CV_64F );
  Tvec_right = cvCreateMat( 3, 1, CV_64F ); 
  
  cvFindExtrinsicCameraParams2(&_objectPoints, &_imagePoints1,&_M1,&_D1,Rvec_left,Tvec_left);
  
  Rvec_left_n = cvCreateMat( 3, 3, CV_64F );
  cvRodrigues2(Rvec_left,Rvec_left_n,0);
    

  cvFindExtrinsicCameraParams2(&_objectPoints, &_imagePoints2,&_M2,&_D2,Rvec_right,Tvec_right);
  
  Rvec_right_n = cvCreateMat( 3, 3, CV_64F );
  cvRodrigues2(Rvec_right,Rvec_right_n,0);
    
/*************************************************************************************************************************/
}
コード例 #9
0
int main(int argc, char *argv[])
{
	if (argc != 6) {
		printf("\nERROR: too few parameters\n");
		help();
		return -1;
	}
	help();
	//INPUT PARAMETERS:
	int board_w = atoi(argv[1]);
	int board_h = atoi(argv[2]);
	int board_n = board_w * board_h;
	CvSize board_sz = cvSize(board_w, board_h);
	CvMat *intrinsic = (CvMat *) cvLoad(argv[3]);
	CvMat *distortion = (CvMat *) cvLoad(argv[4]);
	IplImage *image = 0, *gray_image = 0;
	if ((image = cvLoadImage(argv[5])) == 0) {
		printf("Error: Couldn't load %s\n", argv[5]);
		return -1;
	}
	gray_image = cvCreateImage(cvGetSize(image), 8, 1);
	cvCvtColor(image, gray_image, CV_BGR2GRAY);

	//UNDISTORT OUR IMAGE
	IplImage *mapx = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	IplImage *mapy = cvCreateImage(cvGetSize(image), IPL_DEPTH_32F, 1);
	cvInitUndistortMap(intrinsic, distortion, mapx, mapy);
	IplImage *t = cvCloneImage(image);
	cvRemap(t, image, mapx, mapy);

	//GET THE CHECKERBOARD ON THE PLANE
	cvNamedWindow("Checkers");
	CvPoint2D32f *corners = new CvPoint2D32f[board_n];
	int corner_count = 0;
	int found = cvFindChessboardCorners(image,
										board_sz,
										corners,
										&corner_count,
										CV_CALIB_CB_ADAPTIVE_THRESH |
										CV_CALIB_CB_FILTER_QUADS);
	if (!found) {
		printf
			("Couldn't aquire checkerboard on %s, only found %d of %d corners\n",
			 argv[5], corner_count, board_n);
		return -1;
	}
	//Get Subpixel accuracy on those corners
	cvFindCornerSubPix(gray_image, corners, corner_count,
					   cvSize(11, 11), cvSize(-1, -1),
					   cvTermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30,
									  0.1));

	//GET THE IMAGE AND OBJECT POINTS:
	//Object points are at (r,c): (0,0), (board_w-1,0), (0,board_h-1), (board_w-1,board_h-1)
	//That means corners are at: corners[r*board_w + c]
	CvPoint2D32f objPts[4], imgPts[4];
	objPts[0].x = 0;
	objPts[0].y = 0;
	objPts[1].x = board_w - 1;
	objPts[1].y = 0;
	objPts[2].x = 0;
	objPts[2].y = board_h - 1;
	objPts[3].x = board_w - 1;
	objPts[3].y = board_h - 1;
	imgPts[0] = corners[0];
	imgPts[1] = corners[board_w - 1];
	imgPts[2] = corners[(board_h - 1) * board_w];
	imgPts[3] = corners[(board_h - 1) * board_w + board_w - 1];

	//DRAW THE POINTS in order: B,G,R,YELLOW
	cvCircle(image, cvPointFrom32f(imgPts[0]), 9, CV_RGB(0, 0, 255), 3);
	cvCircle(image, cvPointFrom32f(imgPts[1]), 9, CV_RGB(0, 255, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[2]), 9, CV_RGB(255, 0, 0), 3);
	cvCircle(image, cvPointFrom32f(imgPts[3]), 9, CV_RGB(255, 255, 0), 3);

	//DRAW THE FOUND CHECKERBOARD
	cvDrawChessboardCorners(image, board_sz, corners, corner_count, found);
	cvShowImage("Checkers", image);

	//FIND THE HOMOGRAPHY
	CvMat *H = cvCreateMat(3, 3, CV_32F);
	CvMat *H_invt = cvCreateMat(3, 3, CV_32F);
	cvGetPerspectiveTransform(objPts, imgPts, H);

	//LET THE USER ADJUST THE Z HEIGHT OF THE VIEW
	float Z = 25;
	int key = 0;
	IplImage *birds_image = cvCloneImage(image);
	cvNamedWindow("Birds_Eye");
	while (key != 27) {			//escape key stops
		CV_MAT_ELEM(*H, float, 2, 2) = Z;
//     cvInvert(H,H_invt); //If you want to invert the homography directly
//     cvWarpPerspective(image,birds_image,H_invt,CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS );
		//USE HOMOGRAPHY TO REMAP THE VIEW
		cvWarpPerspective(image, birds_image, H,
						  CV_INTER_LINEAR + CV_WARP_INVERSE_MAP +
						  CV_WARP_FILL_OUTLIERS);
		cvShowImage("Birds_Eye", birds_image);
		key = cvWaitKey();
		if (key == 'u')
			Z += 0.5;
		if (key == 'd')
			Z -= 0.5;
	}

	//SHOW ROTATION AND TRANSLATION VECTORS
	CvMat *image_points = cvCreateMat(4, 1, CV_32FC2);
	CvMat *object_points = cvCreateMat(4, 1, CV_32FC3);
	for (int i = 0; i < 4; ++i) {
		CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
		CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) =
			cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
	}

	CvMat *RotRodrigues = cvCreateMat(3, 1, CV_32F);
	CvMat *Rot = cvCreateMat(3, 3, CV_32F);
	CvMat *Trans = cvCreateMat(3, 1, CV_32F);
	cvFindExtrinsicCameraParams2(object_points, image_points,
								 intrinsic, distortion, RotRodrigues, Trans);
	cvRodrigues2(RotRodrigues, Rot);

	//SAVE AND EXIT
	cvSave("Rot.xml", Rot);
	cvSave("Trans.xml", Trans);
	cvSave("H.xml", H);
	cvInvert(H, H_invt);
	cvSave("H_invt.xml", H_invt);	//Bottom row of H invert is horizon line
	return 0;
}
コード例 #10
0
int CybCamCalibration::calibration(CvSeq* image_points_seq, CvSize img_size, CvSize board_size,
                                   float square_size, float aspect_ratio, int flags, CvMat* camera_matrix,
                                   CvMat* dist_coeffs, CvMat** extr_params, CvMat** reproj_errs,
                                   double* avg_reproj_err) {
    int code;
    int image_count = image_points_seq->total;
    int point_count = board_size.width*board_size.height;
    CvMat* image_points = cvCreateMat( 1, image_count*point_count, CV_32FC2);
    CvMat* object_points = cvCreateMat( 1, image_count*point_count, CV_32FC3);
    CvMat* point_counts = cvCreateMat( 1, image_count, CV_32SC1);
    CvMat rot_vects, trans_vects;
    int i, j, k;
    CvSeqReader reader;
    cvStartReadSeq(image_points_seq, &reader);

    // initialize arrays of points
    for (i = 0; i < image_count; i++) {
        CvPoint2D32f* src_img_pt = (CvPoint2D32f*)reader.ptr;
        CvPoint2D32f* dst_img_pt = ((CvPoint2D32f*)image_points->data.fl) + i
                                   *point_count;
        CvPoint3D32f* obj_pt = ((CvPoint3D32f*)object_points->data.fl) + i
                               *point_count;

        for (j = 0; j < board_size.height; j++)
            for (k = 0; k < board_size.width; k++) {
                *obj_pt++ = cvPoint3D32f(j*square_size, k*square_size, 0);
                *dst_img_pt++ = *src_img_pt++;
            }
        CV_NEXT_SEQ_ELEM( image_points_seq->elem_size, reader );
    }

    cvSet(point_counts, cvScalar(point_count) );

    *extr_params = cvCreateMat(image_count, 6, CV_32FC1);
    cvGetCols( *extr_params, &rot_vects, 0, 3);
    cvGetCols( *extr_params, &trans_vects, 3, 6);

    cvZero( camera_matrix );
    cvZero( dist_coeffs );

    if (flags & CV_CALIB_FIX_ASPECT_RATIO) {
        camera_matrix->data.db[0] = aspect_ratio;
        camera_matrix->data.db[4] = 1.;
    }

    cvCalibrateCamera2(object_points, image_points, point_counts, img_size,
                       camera_matrix, dist_coeffs, &rot_vects, &trans_vects, flags);

    code = cvCheckArr(camera_matrix, CV_CHECK_QUIET) && cvCheckArr(dist_coeffs,
            CV_CHECK_QUIET) && cvCheckArr( *extr_params, CV_CHECK_QUIET);

    *reproj_errs = cvCreateMat( 1, image_count, CV_64FC1);
    *avg_reproj_err = compute_error(object_points, &rot_vects,
                                    &trans_vects, camera_matrix, dist_coeffs, image_points,
                                    point_counts, *reproj_errs);

    cvReleaseMat( &object_points);
    cvReleaseMat( &image_points);
    cvReleaseMat( &point_counts);

    return code;
}
コード例 #11
0
//////////////////////////////
// cv3dTrackerLocateObjects //
//////////////////////////////
CV_IMPL int  cv3dTrackerLocateObjects(int num_cameras, int num_objects,
                 const Cv3dTrackerCameraInfo camera_info[],      // size is num_cameras
                 const Cv3dTracker2dTrackedObject tracking_info[], // size is num_objects*num_cameras
                 Cv3dTrackerTrackedObject tracked_objects[])     // size is num_objects
{
    /*CV_FUNCNAME("cv3dTrackerLocateObjects");*/
    int found_objects = 0;

    // count how many cameras could see each object
    std::map<int, int> count;
    for (int c = 0; c < num_cameras; c++)
    {
        if (!camera_info[c].valid)
            continue;

        for (int i = 0; i < num_objects; i++)
        {
            const Cv3dTracker2dTrackedObject *o = &tracking_info[c*num_objects+i];
            if (o->id != -1)
                count[o->id]++;
        }
    }

    // process each object that was seen by at least two cameras
    for (std::map<int, int>::iterator i = count.begin(); i != count.end(); i++)
    {
        if (i->second < 2)
            continue; // ignore object seen by only one camera
        int id = i->first;

        // find an approximation of the objects location for each pair of cameras that
        // could see this object, and average them
        CvPoint3D32f total = cvPoint3D32f(0, 0, 0);
        int weight = 0;

        for (int c1 = 0; c1 < num_cameras-1; c1++)
        {
            if (!camera_info[c1].valid)
                continue;

            const Cv3dTracker2dTrackedObject *o1 = find(&tracking_info[c1*num_objects],
                                                        num_objects, id);
            if (o1 == NULL)
                continue; // this camera didn't see this object

            CvPoint3D32f p1a = CAMERA_POS(camera_info[c1]);
            CvPoint3D32f p1b = ImageCStoWorldCS(camera_info[c1], o1->p);

            for (int c2 = c1 + 1; c2 < num_cameras; c2++)
            {
                if (!camera_info[c2].valid)
                    continue;

                const Cv3dTracker2dTrackedObject *o2 = find(&tracking_info[c2*num_objects],
                                                            num_objects, id);
                if (o2 == NULL)
                    continue; // this camera didn't see this object

                CvPoint3D32f p2a = CAMERA_POS(camera_info[c2]);
                CvPoint3D32f p2b = ImageCStoWorldCS(camera_info[c2], o2->p);

                // these variables are initialized simply to avoid erroneous error messages
                // from the compiler
                CvPoint3D32f r1 = cvPoint3D32f(0, 0, 0);
                CvPoint3D32f r2 = cvPoint3D32f(0, 0, 0);

                // find the intersection of the two lines (or the points of closest
                // approach, if they don't intersect)
                if (!intersection(p1a, p1b, p2a, p2b, r1, r2))
                    continue;

                total += midpoint(r1, r2);
                weight++;
            }
        }

        CvPoint3D32f center = total/weight;
        tracked_objects[found_objects++] = cv3dTrackerTrackedObject(id, center);
    }

    return found_objects;
}
コード例 #12
0
static CvPoint3D32f operator /(const CvPoint3D32f &p, int d)
{
    return cvPoint3D32f(p.x/d, p.y/d, p.z/d);
}
コード例 #13
0
// Find the midpoint of the line segment between two points.
static CvPoint3D32f midpoint(const CvPoint3D32f &p1, const CvPoint3D32f &p2)
{
    return cvPoint3D32f((p1.x+p2.x)/2, (p1.y+p2.y)/2, (p1.z+p2.z)/2);
}
コード例 #14
0
ファイル: mesh.cpp プロジェクト: minghuig/wing_project
/* Function: TPS
 * 
 * Description: Calculate Thin Plate Spline (TPS) weights from control points 
 *     and build a new height grid by interpolating with them. Code taken and
 *     modified from http://elonen.iki.fi/code/tpsdemo/index.html
 * 
 * Parameters:
 *     features3DPrime: feature points to serve as control for TPS calculation
 *     numPoints: number of feature points
 *     grid: 2D double array to store output of the TPS calculations. The rows 
 *         represent x coordinate values, columns represent y coordinate values, 
 *         and values of the array represent the z coordinate values
 *     gridHeight: height of the grid
 *     gridWidth: width of the grid
 *     gridHeightStartIndex: The grid row and column indices start at 0 because
 *         it is an array. However, the actual x and y start values do not
 *         necessarily start 0, so gridHeightStartIndex represents the value at
 *         which the y coordinates start at in the grid.
 *     gridWidthStartIndex: the value at which the x coordinates start at in the
 *         grid
 *     regularization: smoothing parameter that is the ratio of the error variance
 *         to the scale parameter of the covariance function
 * 
 * Returns: 0 on success, error code on error.
 */
int TPS(
    _In_ CvPoint3D32f *features3DPrime,
    _In_ int numPoints,
    _Out_ double **grid,
    _In_ int gridHeight,
    _In_ int gridWidth,
    _In_ int gridHeightStartIndex,
    _In_ int gridWidthStartIndex,
    _In_ double regularization)
{    
    // We need at least 3 points to define a plane
    if (numPoints < 3)
    {
        return NOT_ENOUGH_POINTS_ERROR;
    }
    
    // Allocate the matrix and vector
    gsl_matrix *L = gsl_matrix_alloc(numPoints+3, numPoints+3);
    gsl_vector *V = gsl_vector_alloc(numPoints+3);
    
    if (L == NULL || V == NULL)
    {
        return OUT_OF_MEMORY_ERROR;
    }
    
    // Fill K (p x p, upper left of L) and calculate mean edge length from control 
    // points. K is symmetrical so we really have to calculate only about half 
    // of the coefficients.
    double a = 0.0;
    
    for (int i = 0; i < numPoints; i++)
    {
        CvPoint3D32f iPoint = features3DPrime[i];
        
        for (int j = i+1; j < numPoints; j++)
        {
            CvPoint3D32f jPoint = features3DPrime[j];
            
            iPoint.z = 0.0;
            jPoint.z = 0.0;
                                    
            double edgeLength = sqrt(pow(iPoint.x-jPoint.x, 2) + pow(iPoint.y-jPoint.y, 2));
            
            gsl_matrix_set(L, i, j, TPSBase(edgeLength));
            gsl_matrix_set(L, j, i, TPSBase(edgeLength));
            
            a += edgeLength * 2;
        }
    }
    
    a /= (double)(numPoints * numPoints);
    
    // Fill the rest of L
    for (int i = 0; i < numPoints; i++)
    {
        // diagonal: reqularization parameters (lambda * a^2)
        gsl_matrix_set(L, i, i, regularization * a * a);
        
        // P (p x 3, upper right)
        gsl_matrix_set(L, i, numPoints, 1.0);
        gsl_matrix_set(L, i, numPoints+1, features3DPrime[i].x);
        gsl_matrix_set(L, i, numPoints+2, features3DPrime[i].y);
        
        // P transposed (3 x p, bottom left)
        gsl_matrix_set(L, numPoints, i, 1.0);
        gsl_matrix_set(L, numPoints+1, i, features3DPrime[i].x);
        gsl_matrix_set(L, numPoints+2, i, features3DPrime[i].y);
    }
    
    // O (3 x 3, lower right)
    for (int i = numPoints; i < numPoints+3; i++)
    {
        for (int j = numPoints; j < numPoints+3; j++)
        {
            gsl_matrix_set(L, i, j, 0.0);
        }
    }
        
    // Fill the right hand vector V
    for (int i = 0; i < numPoints; i++)
    {
        gsl_vector_set(V, i, features3DPrime[i].z);
    }
    
    gsl_vector_set(V, numPoints, 0.0);
    gsl_vector_set(V, numPoints+1, 0.0);
    gsl_vector_set(V, numPoints+2, 0.0);
    
    int signum;
    gsl_permutation *perm = gsl_permutation_alloc(numPoints+3);
    
    if (perm == NULL)
    {
        return OUT_OF_MEMORY_ERROR;
    }
    
    // Solve the linear system inplace
    gsl_linalg_LU_decomp(L, perm, &signum);
    gsl_linalg_LU_svx(L, perm, V);    
    
    gsl_permutation_free(perm);
    
    // Interpolate grid heights
    for (int x = gridWidthStartIndex; x < gridWidthStartIndex + gridWidth; x++)
    {
        for (int y = gridHeightStartIndex; y < gridHeightStartIndex + gridHeight; y++ )
        {
            double h = gsl_vector_get(V, numPoints) + gsl_vector_get(V, numPoints+1)*x + gsl_vector_get(V, numPoints+2)*y;

            for (int i = 0; i < numPoints; i++)
            {
                CvPoint3D32f iPoint = features3DPrime[i];
                CvPoint3D32f curPoint = cvPoint3D32f((double)x, (double)y, 0.0);

                iPoint.z = 0;
                h += gsl_vector_get(V, i) * TPSBase(sqrt(pow(iPoint.x-curPoint.x, 2) + pow(iPoint.y-curPoint.y, 2) + pow(iPoint.z-curPoint.z, 2)));
            }
            
            grid[x - gridWidthStartIndex][y - gridHeightStartIndex] = h;
        }
    }
    
    gsl_matrix_free(L);
    gsl_vector_free(V);
    
    return 0;
}
コード例 #15
0
int StereoVision::calibrationEnd() {
    calibrationStarted = false;

    // ARRAY AND VECTOR STORAGE:
    double M1[3][3], M2[3][3], D1[5], D2[5];
    double R[3][3], T[3], E[3][3], F[3][3];
    CvMat _M1,_M2,_D1,_D2,_R,_T,_E,_F;

    _M1 = cvMat(3, 3, CV_64F, M1 );
    _M2 = cvMat(3, 3, CV_64F, M2 );
    _D1 = cvMat(1, 5, CV_64F, D1 );
    _D2 = cvMat(1, 5, CV_64F, D2 );
    _R = cvMat(3, 3, CV_64F, R );
    _T = cvMat(3, 1, CV_64F, T );
    _E = cvMat(3, 3, CV_64F, E );
    _F = cvMat(3, 3, CV_64F, F );

    // HARVEST CHESSBOARD 3D OBJECT POINT LIST:
    objectPoints.resize(sampleCount*cornersN);

    for(int k=0; k<sampleCount; k++)
        for(int i = 0; i < cornersY; i++ )
            for(int j = 0; j < cornersX; j++ )
                objectPoints[k*cornersY*cornersX + i*cornersX + j] = cvPoint3D32f(i, j, 0);


    npoints.resize(sampleCount,cornersN);

    int N = sampleCount * cornersN;


    CvMat _objectPoints = cvMat(1, N, CV_32FC3, &objectPoints[0] );
    CvMat _imagePoints1 = cvMat(1, N, CV_32FC2, &points[0][0] );
    CvMat _imagePoints2 = cvMat(1, N, CV_32FC2, &points[1][0] );
    CvMat _npoints = cvMat(1, npoints.size(), CV_32S, &npoints[0] );
    cvSetIdentity(&_M1);
    cvSetIdentity(&_M2);
    cvZero(&_D1);
    cvZero(&_D2);

    //CALIBRATE THE STEREO CAMERAS
    cvStereoCalibrate( &_objectPoints, &_imagePoints1,
                       &_imagePoints2, &_npoints,
                       &_M1, &_D1, &_M2, &_D2,
                       imageSize, &_R, &_T, &_E, &_F,
                       cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS, 100, 1e-5),
                       CV_CALIB_FIX_ASPECT_RATIO + CV_CALIB_ZERO_TANGENT_DIST + CV_CALIB_SAME_FOCAL_LENGTH
                     );

    //Always work in undistorted space
    cvUndistortPoints( &_imagePoints1, &_imagePoints1,&_M1, &_D1, 0, &_M1 );
    cvUndistortPoints( &_imagePoints2, &_imagePoints2,&_M2, &_D2, 0, &_M2 );

    //COMPUTE AND DISPLAY RECTIFICATION


    double R1[3][3], R2[3][3];
    CvMat _R1 = cvMat(3, 3, CV_64F, R1);
    CvMat _R2 = cvMat(3, 3, CV_64F, R2);

    //HARTLEY'S RECTIFICATION METHOD
    double H1[3][3], H2[3][3], iM[3][3];
    CvMat _H1 = cvMat(3, 3, CV_64F, H1);
    CvMat _H2 = cvMat(3, 3, CV_64F, H2);
    CvMat _iM = cvMat(3, 3, CV_64F, iM);

    cvStereoRectifyUncalibrated(
        &_imagePoints1,&_imagePoints2, &_F,
        imageSize,
        &_H1, &_H2, 3
    );
    cvInvert(&_M1, &_iM);
    cvMatMul(&_H1, &_M1, &_R1);
    cvMatMul(&_iM, &_R1, &_R1);
    cvInvert(&_M2, &_iM);
    cvMatMul(&_H2, &_M2, &_R2);
    cvMatMul(&_iM, &_R2, &_R2);


    //Precompute map for cvRemap()
    cvReleaseMat(&mx1);
    cvReleaseMat(&my1);
    cvReleaseMat(&mx2);
    cvReleaseMat(&my2);
    mx1 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );
    my1 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );
    mx2 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );
    my2 = cvCreateMat( imageSize.height,imageSize.width, CV_32F );

    cvInitUndistortRectifyMap(&_M1,&_D1,&_R1,&_M1,mx1,my1);
    cvInitUndistortRectifyMap(&_M2,&_D2,&_R2,&_M2,mx2,my2);

    calibrationDone = true;

    return RESULT_OK;
}
コード例 #16
0
// Estimate face absolute orientations
vector<float> CRecognitionAlgs::CalcAbsoluteOrientations(
    const VO_Shape& iShape2D,
    const VO_Shape& iShape3D,
    VO_Shape& oShape2D)
{
    assert (iShape2D.GetNbOfPoints() == iShape3D.GetNbOfPoints() );
    unsigned int NbOfPoints = iShape3D.GetNbOfPoints();
    Point3f pt3d;
    Point2f pt2d;
    float height1 = iShape2D.GetHeight();
    float height2 = iShape3D.GetHeight();
    VO_Shape tempShape2D = iShape2D;
    tempShape2D.Scale(height2/height1);

    //Create the model points
    std::vector<CvPoint3D32f> modelPoints;
    for(unsigned int i = 0; i < NbOfPoints; ++i)
    {
        pt3d = iShape3D.GetA3DPoint(i);
        modelPoints.push_back(cvPoint3D32f(pt3d.x, pt3d.y, pt3d.z));
    }

    //Create the image points
    std::vector<CvPoint2D32f> srcImagePoints;
    for(unsigned int i = 0; i < NbOfPoints; ++i)
    {
        pt2d = tempShape2D.GetA2DPoint(i);
        srcImagePoints.push_back(cvPoint2D32f(pt2d.x, pt2d.y));
    }

    //Create the POSIT object with the model points
    CvPOSITObject *positObject = cvCreatePOSITObject( &modelPoints[0], NbOfPoints );

    //Estimate the pose
    CvMatr32f rotation_matrix = new float[9];
    CvVect32f translation_vector = new float[3];
    CvTermCriteria criteria = cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 100, 1.0e-4f);
    cvPOSIT( positObject, &srcImagePoints[0], FOCAL_LENGTH, criteria, rotation_matrix, translation_vector );

    //rotation_matrix to Euler angles, refer to VO_Shape::GetRotation
    float sin_beta  = -rotation_matrix[0 * 3 + 2];
    float tan_alpha = rotation_matrix[1 * 3 + 2] / rotation_matrix[2 * 3 + 2];
    float tan_gamma = rotation_matrix[0 * 3 + 1] / rotation_matrix[0 * 3 + 0];

    //Project the model points with the estimated pose
    oShape2D = tempShape2D;
    for ( unsigned int i=0; i < NbOfPoints; ++i )
    {
        pt3d.x = rotation_matrix[0] * modelPoints[i].x +
            rotation_matrix[1] * modelPoints[i].y +
            rotation_matrix[2] * modelPoints[i].z +
            translation_vector[0];
        pt3d.y = rotation_matrix[3] * modelPoints[i].x +
            rotation_matrix[4] * modelPoints[i].y +
            rotation_matrix[5] * modelPoints[i].z +
            translation_vector[1];
        pt3d.z = rotation_matrix[6] * modelPoints[i].x +
            rotation_matrix[7] * modelPoints[i].y +
            rotation_matrix[8] * modelPoints[i].z +
            translation_vector[2];
        if ( pt3d.z != 0 )
        {
            pt2d.x = FOCAL_LENGTH * pt3d.x / pt3d.z;
            pt2d.y = FOCAL_LENGTH * pt3d.y / pt3d.z;
        }
        oShape2D.SetA2DPoint(pt2d, i);
    }

    //return Euler angles
    vector<float> pos(3);
    pos[0] = atan(tan_alpha);    // yaw
    pos[1] = asin(sin_beta);     // pitch
    pos[2] = atan(tan_gamma);    // roll
    return pos;
}
コード例 #17
0
ファイル: ProjectionModel.cpp プロジェクト: mp3guy/PAC7302
void ProjectionModel::calculateProjection()
{
    if(intrinsic_matrix != 0 && distortion_coeffs != 0)
    {
        int corner_count = Chessboard::board_total;

        cvCvtColor(sourceImg, gray_image, CV_RGB2GRAY);

        int found = cvFindChessboardCorners(gray_image,
                                            board_sz,
                                            corners,
                                            &corner_count,
                                            CV_CALIB_CB_ADAPTIVE_THRESH | CV_CALIB_CB_FILTER_QUADS);
        if(!found)
        {
            return;
        }

        cvFindCornerSubPix(gray_image,
                           corners,
                           corner_count,
                           cvSize(11,11),
                           cvSize(-1,-1),
                           cvTermCriteria( CV_TERMCRIT_EPS+CV_TERMCRIT_ITER, 30, 0.1));

        objPts[0].x = 0;
        objPts[0].y = 0;
        objPts[1].x = Chessboard::board_w - 1;
        objPts[1].y = 0;
        objPts[2].x = 0;
        objPts[2].y = Chessboard::board_h - 1;
        objPts[3].x = Chessboard::board_w - 1;
        objPts[3].y = Chessboard::board_h - 1;
        imgPts[3] = corners[0];
        imgPts[2] = corners[Chessboard::board_w - 1];
        imgPts[1] = corners[(Chessboard::board_h - 1) * Chessboard::board_w];
        imgPts[0] = corners[(Chessboard::board_h - 1) * Chessboard::board_w + Chessboard::board_w - 1];

        cvGetPerspectiveTransform(objPts, imgPts, H);

        for(int i = 0; i < 4; ++i)
        {
            CV_MAT_ELEM(*image_points, CvPoint2D32f, i, 0) = imgPts[i];
            CV_MAT_ELEM(*object_points, CvPoint3D32f, i, 0) = cvPoint3D32f(objPts[i].x, objPts[i].y, 0);
        }

        cvFindExtrinsicCameraParams2(object_points,
                                     image_points,
                                     intrinsic_matrix,
                                     distortion_coeffs,
                                     rotation_vector,
                                     translation_vector);

        cvRodrigues2(rotation_vector, rotation_matrix);

        for(int f = 0; f < 3; f++)
        {
            for(int c = 0; c < 3; c++)
            {
                fullMatrix[c * 4 + f] = rotation_matrix->data.fl[f * 3 + c];   //transposed
            }
        }

        fullMatrix[3] = 0.0;
        fullMatrix[7] = 0.0;
        fullMatrix[11] = 0.0;
        fullMatrix[12] = translation_vector->data.fl[0];
        fullMatrix[13] = translation_vector->data.fl[1];
        fullMatrix[14] = translation_vector->data.fl[2];
        fullMatrix[15] = 1.0;
    }
}
コード例 #18
0
static CvPoint3D32f operator +(CvPoint3D32f a, CvPoint3D32f b)
{
    return cvPoint3D32f(a.x + b.x, a.y + b.y, a.z + b.z);
}
コード例 #19
0
static CvPoint3D32f operator -(CvPoint3D32f a, CvPoint3D32f b)
{
    return cvPoint3D32f(a.x - b.x, a.y - b.y, a.z - b.z);
}
コード例 #20
0
static CvPoint3D32f operator *(CvPoint3D32f v, double f)
{
    return cvPoint3D32f(f*v.x, f*v.y, f*v.z);
}
コード例 #21
0
ファイル: calibfilter.cpp プロジェクト: 406089450/opencv
void CvCalibFilter::Stop( bool calibrate )
{
    int i, j;
    isCalibrated = false;

    // deallocate undistortion maps
    for( i = 0; i < cameraCount; i++ )
    {
        cvReleaseMat( &undistMap[i][0] );
        cvReleaseMat( &undistMap[i][1] );
        cvReleaseMat( &rectMap[i][0] );
        cvReleaseMat( &rectMap[i][1] );
    }

    if( calibrate && framesAccepted > 0 )
    {
        int n = framesAccepted;
        CvPoint3D32f* buffer =
            (CvPoint3D32f*)cvAlloc( n * etalonPointCount * sizeof(buffer[0]));
        CvMat mat;
        float* rotMatr = (float*)cvAlloc( n * 9 * sizeof(rotMatr[0]));
        float* transVect = (float*)cvAlloc( n * 3 * sizeof(transVect[0]));
        int* counts = (int*)cvAlloc( n * sizeof(counts[0]));

        cvInitMatHeader( &mat, 1, sizeof(CvCamera)/sizeof(float), CV_32FC1, 0 );
        memset( cameraParams, 0, cameraCount * sizeof(cameraParams[0]));

        for( i = 0; i < framesAccepted; i++ )
        {
            counts[i] = etalonPointCount;
            for( j = 0; j < etalonPointCount; j++ )
                buffer[i * etalonPointCount + j] = cvPoint3D32f( etalonPoints[j].x,
                                                                 etalonPoints[j].y, 0 );
        }

        for( i = 0; i < cameraCount; i++ )
        {
            cvCalibrateCamera( framesAccepted, counts,
                               imgSize, points[i], buffer,
                               cameraParams[i].distortion,
                               cameraParams[i].matrix,
                               transVect, rotMatr, 0 );

            cameraParams[i].imgSize[0] = (float)imgSize.width;
            cameraParams[i].imgSize[1] = (float)imgSize.height;

//            cameraParams[i].focalLength[0] = cameraParams[i].matrix[0];
//            cameraParams[i].focalLength[1] = cameraParams[i].matrix[4];

//            cameraParams[i].principalPoint[0] = cameraParams[i].matrix[2];
//            cameraParams[i].principalPoint[1] = cameraParams[i].matrix[5];

            memcpy( cameraParams[i].rotMatr, rotMatr, 9 * sizeof(rotMatr[0]));
            memcpy( cameraParams[i].transVect, transVect, 3 * sizeof(transVect[0]));

            mat.data.ptr = (uchar*)(cameraParams + i);

            /* check resultant camera parameters: if there are some INF's or NAN's,
               stop and reset results */
            if( !cvCheckArr( &mat, CV_CHECK_RANGE | CV_CHECK_QUIET, -10000, 10000 ))
                break;
        }



        isCalibrated = i == cameraCount;

        {/* calibrate stereo cameras */
            if( cameraCount == 2 )
            {
                stereo.camera[0] = &cameraParams[0];
                stereo.camera[1] = &cameraParams[1];

                icvStereoCalibration( framesAccepted, counts,
                                   imgSize,
                                   points[0],points[1],
                                   buffer,
                                   &stereo);

                for( i = 0; i < 9; i++ )
                {
                    stereo.fundMatr[i] = stereo.fundMatr[i];
                }

            }

        }

        cvFree( &buffer );
        cvFree( &counts );
        cvFree( &rotMatr );
        cvFree( &transVect );
    }

    framesAccepted = 0;
}
コード例 #22
0
ファイル: FloorModel.cpp プロジェクト: githubbar/NewVision
// ------------------------------------------------------------------------
 CvPoint3D32f FloorModel::coordsFloor2Real(CvPoint p) {
	return cvPoint3D32f(
		floormin.x+p.x*(floormax.x-floormin.x)/w,
		floormin.y+p.y*(floormax.y-floormin.y)/h,
		0);
}