TransformErrFunction::TransformErrFunction(TransformSet * transformSet, int alignTo, int latestImageId, int latestNOnly)
    : transformSet_(transformSet), reprojErrVal_(-HUGE), alignTo_(alignTo), RECalculated_(false), scaleCost_(1.0), minIdToRefine_(latestImageId - latestNOnly)
{
    //Choose some points to project (4 is best I think--necessary+sufficient)
    double pointSquareSideLen = 400; //roughly 4 corners of an image
    pointVec_.push_back(cvPoint2D64f(0, 0));
    pointVec_.push_back(cvPoint2D64f(pointSquareSideLen, 0));
    pointVec_.push_back(cvPoint2D64f(0, pointSquareSideLen));
    pointVec_.push_back(cvPoint2D64f(pointSquareSideLen, pointSquareSideLen));

    //Add all the parameters to the param vector
    for(TransformSet::iterator ppTrans = transformSet_->begin(); ppTrans != transformSet_->end(); ppTrans++)
    {
        int j = ppTrans->first.im1Id();
        int k = ppTrans->first.im2Id();

        if(k == alignTo_ && j != alignTo_) //Refine all transform T_j0 : j!=0
        {
            if(minIdToRefine_ < j)
            {
                Transform * Tj0 = ppTrans->second->transform();
                Tj0->addParams(&indexedParamLocations_);
            }
        }
    }

    //Set scaleCost_ so that costs are around 1 and we get sensible well-conditioned derivatives
    evaluateReprojErr();
    if(reprojErrVal_ > 0.0001) scaleCost_ = 1.0 / reprojErrVal_;
    RECalculated_ = false;
}
예제 #2
0
/**
 *	OptimizePair:
 *		Input:
 *			cam1 - the first camera (already optimized)
 *          cam2 - the second camera with its intrinsic matrix initialized
 *			dR - an initial relative 3x3 camera rotation matrix
 *          set1 - SIFT features in the first image
 *          set2 - SIFT features in the second image
 *          aryInlier - the homography iniliers
 *
 *		Ouput:
 *          cam2 - update cam2's optimized focal length and pose
 */
void OptimizeSingle( const CCamera& cam1, CCamera& cam2,
					double* dR,
					const CFeatureArray& set1,
					const CFeatureArray& set2, 
					const MatchArray& aryInlier )
{
	// Step 1. Initialize the camera pose of cam2

	// cam2's relative rotation to cam1
	CvMat matR = cvMat( 3, 3, CV_64F, dR );

	// cam1's absolute rotation
	double dRod1[3];
	CvMat matRod1 = cvMat( 3, 1, CV_64F, dRod1 );
	cam1.GetPose( dRod1 );

	double dRot1[9];
	CvMat matRot1 = cvMat( 3, 3, CV_64F, dRot1 );
	cvRodrigues2( &matRod1, &matRot1 );

	// compose R and Rot1 to get cam2's initial absolute rotation
	cvMatMul( &matR, &matRot1, &matR );

	double dRod2[3];
	CvMat matRod2 = cvMat( 3, 1, CV_64F, dRod2 );

	cvRodrigues2( &matR, &matRod2 );
	cam2.SetPose( dRod2 );

	// Step 2. Now we can perform bundle adjustment for cam2
	CBundleAdjust ba( 1, BA_ITER );
	ba.SetCamera( &cam2, 0 );

	// set points
	for( int i=0; i<aryInlier.size(); ++i )
	{
		const CFeature* ft1 = set1[ aryInlier[i].first ];
		const CFeature* ft2 = set2[ aryInlier[i].second ];

		double dir[3];
		cam1.GetRayDirectionWS( dir, cvPoint2D64f( ft1->x, ft1->y ) );
		
		// the 3d position
		CvPoint3D64f pt3 = cvPoint3D64f( dir[0]*radius, dir[1]*radius, dir[2]*radius );

		ba.SetPointProjection( pt3, 0, cvPoint2D64f( ft2->x, ft2->y ) );
	}

	ba.DoMotion();

	ba.GetAdjustedCamera( &cam2, 0 );
}
예제 #3
0
void find_ellipse(CvPoint *points, int n_points, CvPoint2D64f *center, CvPoint2D64f *axes)
{
  double *mean = (double*)malloc(2 * sizeof(double));
  double *covariance = (double*)malloc(4 * sizeof(double));

  get_mean_and_covariance_of_points_locations(points, n_points, mean, covariance);

  *center = cvPoint2D64f(mean[0], mean[1]);
  *axes = cvPoint2D64f(sqrt(covariance[0]), sqrt(covariance[3]));

  free(mean);
  free(covariance);
}
예제 #4
0
/**
 *	OptimizePair:
 *		Input:
 *			cam1 - the first camera with its intrinsic matrix and pose initialized ([R|T]=[I|0])
 *          cam2 - the second camera with its intrinsic matrix initialized
 *			dR - an initial relative 3x3 camera rotation matrix
 *          set1 - SIFT features in the first image
 *          set2 - SIFT features in the second image
 *          aryInlier - the homography iniliers
 *
 *		Ouput:
 *			cam1 - update cam1's optimized folcal length
 *          cam2 - update cam2's optimized focal length and pose
 */
void OptimizePair( CCamera& cam1, CCamera& cam2,
				  double* dR,
				  const CFeatureArray& set1,
				  const CFeatureArray& set2, 
				  const MatchArray& aryInlier )
{
	CBundleAdjust ba( 2, BA_ITER );

	// Step 1. To perform bundle adjustment, we initialize cam1 and cam2 
	//         as [K][I|0} and [K][R|0] respectively and optimize R using 
	//         bundle adjustment.
	double dRod[3];
	CvMat matRod = cvMat( 3, 1, CV_64F, dRod );
	CvMat matR = cvMat( 3, 3, CV_64F, dR );

	cvRodrigues2( &matR, &matRod );
	cam2.SetPose( dRod );

	// Set cameras
	ba.SetCamera( &cam1, 0 );
	ba.SetCamera( &cam2, 1 );

	// Step 2. We still need to create a set of 3D points. From each homography inlier, 
	//         a 3D point can be initialized by locating it on the ray that goes through 
	//         its projection.	
	for( int i=0; i<aryInlier.size(); ++i )
	{
		const CFeature* ft1 = set1[ aryInlier[i].first ];
		const CFeature* ft2 = set2[ aryInlier[i].second ];

		double dir[3];
		cam1.GetRayDirectionWS( dir, cvPoint2D64f( ft1->x, ft1->y ) );
		
		// the initialized 3d position
		CvPoint3D64f pt3 = cvPoint3D64f( dir[0]*radius, dir[1]*radius, dir[2]*radius );

		// set the 3d point and its projections in both images
		ba.SetPointProjection( pt3, 0, cvPoint2D64f( ft1->x, ft1->y ) );
		ba.SetPointProjection( pt3, 1, cvPoint2D64f( ft2->x, ft2->y ) );
	}

	// perform bundle adjustment
	ba.DoMotionAndStructure();

	// retrieve the optimized cameras
	ba.GetAdjustedCamera( &cam1, 0 );
	ba.GetAdjustedCamera( &cam2, 1 );
}
예제 #5
0
void GetHomographyInliers( MatchArray& aryInlier,
						  const MatchArray& aryMatch,
						  const CFeatureArray& set1,
						  const CFeatureArray& set2,
						  const CHomography& h**o,
						  float tol )
{
	float SQR_TOL = tol*tol;

	aryInlier.resize( aryMatch.size() );

	int k=0;
	for( int i=0; i<aryMatch.size(); ++i )
	{
		const CFeature* ft1 = set1[ aryMatch[i].first ];
		const CFeature* ft2 = set2[ aryMatch[i].second ];

		CvPoint2D64f pt = h**o * cvPoint2D64f( ft1->x, ft1->y );

		double dx = pt.x -ft2->x;
		double dy = pt.y -ft2->y;

		if( dx*dx +dy*dy < SQR_TOL ) // a homography inlier
			aryInlier[k++] = aryMatch[i];	
	}

	aryInlier.resize(k);
}
예제 #6
0
파일: AHViewer.cpp 프로젝트: uyaponz/AHRTC
void drawMallet(cv::Mat &f, CvPoint2D64f malletXY, double R, CvScalar color)
{
    CvPoint center = convH2CV(malletXY, cvPoint2D64f(HKY_W,HKY_H),
                              scale, cvPoint(ofsX,ofsY));
    int r_ = static_cast<int>((R/2.0) / scale);
    cv::circle(f, center, r_, color, -1);
}
예제 #7
0
파일: MyMath.cpp 프로젝트: uyaponz/AHRTC
CvPoint2D64f convCV2H(const CvPoint &pos, const CvPoint2D64f &hkyWH,
                      double scaleCV2H, const CvPoint &drawOfsXY)
{
    CvPoint2D64f ret
        = cvPoint2D64f(convCV2H_x(pos.x, hkyWH.x, scaleCV2H, drawOfsXY.x),
                       convCV2H_y(pos.y, hkyWH.y, scaleCV2H, drawOfsXY.y));
    return ret;
}
예제 #8
0
SimilarityTransform::SimilarityTransform()
{
    //**this = cvMat(ROWS, COLS, CV_64FC1, data_);
    //cvSetIdentity(this);
    scale_ = 1.0;
    theta_ = 0.0;
    translation_ = cvPoint2D64f(0,0);
}
예제 #9
0
파일: AHViewer.cpp 프로젝트: uyaponz/AHRTC
RTC::ReturnCode_t AHViewer::onExecute(RTC::UniqueId ec_id)
{
    m_malletXY_inIn.read();
    m_armXY_inIn.read();
    m_puckXY_inIn.read();

    CvPoint2D64f pMlt = cvPoint2D64f(m_malletXY_in.data[0], m_malletXY_in.data[1]);
    CvPoint2D64f rMlt = cvPoint2D64f(m_armXY_in.data[0],    m_armXY_in.data[1]);
    CvPoint2D64f puck = cvPoint2D64f(m_puckXY_in.data[0],    m_puckXY_in.data[1]);

    frame = cv::Scalar(255,255,255);
    drawHockeyBoard(frame);
    drawMallet(frame, pMlt, PLAYER_MLT_R, cv::Scalar(255,128,0));
    drawMallet(frame, rMlt, ARM_MLT_R,    cv::Scalar(255,0,0));
    drawMallet(frame, puck, PUCK_R,       cv::Scalar(0,0,255));

    cv::imshow(winName, frame);
    cv::waitKey(10);

    return RTC::RTC_OK;
}
예제 #10
0
/*
  Performs a perspective transformation on a single point.  That is, for a
  point (x, y) and a 3 x 3 matrix T this function returns the point
  (u, v), where
  
  [x' y' w']^T = T * [x y 1]^T,
  
  and
  
  (u, v) = (x'/w', y'/w').

  Note that affine transforms are a subset of perspective transforms.
  
  @param pt a 2D point
  @param T a perspective transformation matrix
  
  @return Returns the point (u, v) as above.
*/
CvPoint2D64f persp_xform_pt( CvPoint2D64f pt, CvMat* T )
{
  CvMat XY, UV;
  double xy[3] = { pt.x, pt.y, 1.0 }, uv[3] = { 0 };
  CvPoint2D64f rslt;

  cvInitMatHeader( &XY, 3, 1, CV_64FC1, xy, CV_AUTOSTEP );
  cvInitMatHeader( &UV, 3, 1, CV_64FC1, uv, CV_AUTOSTEP );
  cvMatMul( T, &XY, &UV );
  rslt = cvPoint2D64f( uv[0] / uv[2], uv[1] / uv[2] );

  return rslt;
}
예제 #11
0
파일: xform.c 프로젝트: BITDIP/BITDIP
/*计算点pt经透视变换后的点,即给定一点pt和透视变换矩阵T,计算变换后的点
给定点(x,y),变换矩阵M,计算[x',y',w']^T = M * [x,y,1]^T(^T表示转置),
则变换后的点是(u,v) = (x'/w', y'/w')
注意:仿射变换是透视变换的特例
参数:
pt:一个二维点
T:透视变换矩阵
返回值:pt经透视变换后的点
*/
CvPoint2D64f persp_xform_pt(CvPoint2D64f pt, CvMat* T)
{
	CvMat XY, UV;		 //XY:点pt对应的3*1列向量,UV:pt变换后的点对应的3*1列向量 
	double xy[3] = { pt.x, pt.y, 1.0 }, uv[3] = { 0 };		//对应的数据  
	CvPoint2D64f rslt;		//结果  

	//初始化矩阵头  
	cvInitMatHeader(&XY, 3, 1, CV_64FC1, xy, CV_AUTOSTEP);
	cvInitMatHeader(&UV, 3, 1, CV_64FC1, uv, CV_AUTOSTEP);
	cvMatMul(T, &XY, &UV);		//计算矩阵乘法,T*XY,结果放在UV中  
	rslt = cvPoint2D64f(uv[0] / uv[2], uv[1] / uv[2]);		//得到转换后的点  

	return rslt;
}
예제 #12
0
CvPoint2D64f AffineTransform::applyToPoint(CvPoint2D64f p) const
{
/*    double pointData[COLS], newPointData[ROWS];
    CvMat pointAsMat = cvMat(COLS, 1, CV_64FC1, pointData);
    pointData[0] = p.x; pointData[1] = p.y; pointData[2] = 1.0;
    CvMat transPointAsMat = cvMat(ROWS, 1, CV_64FC1, newPointData);
    cvMatMul(*this, &pointAsMat, &transPointAsMat);
    return cvPoint2D64f(newPointData[0], newPointData[1]);*/

    double x = data_[0]*p.x + data_[1]*p.y + data_[2];
    double y = data_[3]*p.x + data_[4]*p.y + data_[5];

    return cvPoint2D64f(x, y);
}
예제 #13
0
CvPoint2D64f PerspectiveTransform::applyToPoint(CvPoint2D64f p) const
{
/*    double pointData[COLS], newPointData[ROWS];
    CvMat pointAsMat = cvMat(COLS, 1, CV_64FC1, pointData);
    pointData[0] = p.x; pointData[1] = p.y; pointData[2] = 1.0;
    CvMat transPointAsMat = cvMat(ROWS, 1, CV_64FC1, newPointData);
    cvMatMul((CvMat *)this, &pointAsMat, &transPointAsMat);
    double dHomogScale = 1.0/newPointData[2];
    return cvPoint2D64f(dHomogScale*newPointData[0], dHomogScale*newPointData[1]);
*/
    double x = data_[0]*p.x + data_[1]*p.y + data_[2];
    double y = data_[3]*p.x + data_[4]*p.y + data_[5];
    double t = data_[6]*p.x + data_[7]*p.y + data_[8];
    double t_inv = 1.0/t;

    return cvPoint2D64f(t_inv*x, t_inv*y);
}
예제 #14
0
SimilarityTransform * AffineTransform::getSimilarityTransform() const
{
    double subMatData[4];
    CvMat subMat = cvMat(2, 2, CV_64FC1, subMatData);
    subMatData[0] = data_[0];
    subMatData[1] = data_[1];
    subMatData[2] = data_[3];
    subMatData[3] = data_[4];
    
    double scale = sqrt(cvDet(&subMat));
    //double theta = acos((*(const Transform *)this)(0,0)/scale); //Todo: do a LS fit?
    double theta = asin((*(const Transform *)this)(0,1)/scale); //Todo: do a LS fit?
    CvPoint2D64f translation = cvPoint2D64f((*(const Transform *)this)(0,2), (*(const Transform *)this)(1,2));
    SimilarityTransform * pm = new SimilarityTransform(theta, scale, translation);

    //for(int x=0; x<2; x++)
    //    for(int y=0; y<2; y++)
    //        m(x,y) = scale_ * cvmGet(*this, x, y);

    return pm;
}
예제 #15
0
void faceDbCreator(const char filePath[50],const char coordsFilename[100],
                                      const int startFile,const int endFile,
                                      const int noIterations,const int border){
  /**Number of Feature Points used in aligning images.**/
  const int noFeaturePoints             =   4;

  const int initialSize                 =   38;

  int i,j,k,iteration;

  /**No of files from DB added for alignment**/
  int noFiles                           =   0;
  double xr                             =   0;
  double yr                             =   0;
  int x,y;
  char filePathCopy[100];
  /**Corrds of the standards face with respect to initialSize**/
  CvMat *stdCoords                      =   cvCreateMat(noFeaturePoints*2,1,
                                                                  CV_64FC1);
  double stdCoordsData[]                =   {5+border,6+border,32+border,
                                            6+border,18+border,15+border,
                                                    18+border,25+border};
  stdCoords->data.db                    =   stdCoordsData;

  /**Average Coords of the faces aligned so far**/
  double avgData[noFeaturePoints*2];
  CvMat *avgMat                         =   cvCreateMat(noFeaturePoints*2,1,
                                                                    CV_64FC1);
  avgMat->data.db                       =   avgData;
  /**Coords to which other coordinates are aligned to**/
  double testData[noFeaturePoints*2];
  CvMat *testMat                        =   cvCreateMat(noFeaturePoints*2,1,
                                                                    CV_64FC1);
  testMat->data.db                      =   testData;

  cvCopy(stdCoords,testMat);

  double tempCoords[noFeaturePoints*2];

  /**Coords of all the image in the database**/
  CvMat* coords[endFile-startFile+1];

  double coordsData[endFile-startFile+1][noFeaturePoints*8];

  /**Face DB image file names**/
  char fileNames[endFile-startFile+1][100];
  char tempFileName[100];
  char tempStr[50];

  IplImage *img                         =   NULL;
  IplImage *dst                         =   NULL;

  FILE* coordsFile                      =   fopen(coordsFilename,"r+");
  FILE* t                               =   NULL;

  if (coordsFile){
    for (i=-startFile+1;i<=endFile-startFile;++i){
      if(!feof(coordsFile)){
        fscanf(coordsFile,"%s %lf %lf %lf %lf %lf %lf %lf %lf",&tempStr,
                                &tempCoords[0],&tempCoords[1],&tempCoords[2],
                                &tempCoords[3],&tempCoords[4],&tempCoords[5],
                                                &tempCoords[6],&tempCoords[7]);
        /**Skip the coords upto startImage**/
        if (i>=0){
          strcpy(tempFileName,filePath);
          strcat(tempFileName,tempStr);
          /**Check whether the file exists**/
          if (t=fopen(tempFileName,"r")){
            fclose(t);
            strcpy(fileNames[noFiles],tempFileName);

            coords[noFiles]             =  cvCreateMat(noFeaturePoints*2,4,
                                                                    CV_64FC1);
            faceDbCreatorFillData(coordsData[noFiles],tempCoords,noFeaturePoints);

            coords[noFiles]->data.db    =   coordsData[noFiles];

            ++noFiles;
          }
        }
      }
      else{
        noFiles                         =   i-1;
        break;
      }
    }
    fclose(coordsFile);

    if (!noFiles){
      printf("Face DB Creator Error: No File To Process\n");
      exit(EXIT_FAILURE);
    }
  }
  else {
    printf("Face DB Creator Error: Could Not Open Coords File\n");
    exit(EXIT_FAILURE);
  }

  /**PsuedoInverse**/
  CvMat *temp2                          =   cvCreateMat(4,1,CV_64FC1);
  double tempData2[4];
  temp2->data.db                        =   tempData2;

  for (iteration=0;iteration<noIterations;++iteration){
    cvSetZero(avgMat);
    for (i=0;i<noFiles;++i){
      pseudoInverse(coords[i],testMat,temp2);
      for (j=0;j<noFeaturePoints;++j){
        xr                              =   coordsData[i][j*8]*temp2->data.db[0]
                                            -coordsData[i][j*8+4]*
                                            temp2->data.db[1]+temp2->data.db[2];

        yr                              =   coordsData[i][j*8]*temp2->data.db[1]
                                            +coordsData[i][j*8+4]*
                                            temp2->data.db[0]+temp2->data.db[3];
        coordsData[i][j*8]              =   xr;
        coordsData[i][j*8+5]            =   xr;
        coordsData[i][j*8+1]            =   -yr;
        coordsData[i][j*8+4]            =   yr;
        avgData[j*2]                    +=  xr;
        avgData[j*2+1]                  +=  yr;
      }

      img                               =   cvLoadImage(fileNames[i],
                                                      CV_LOAD_IMAGE_GRAYSCALE);

      dst                               =   cvCreateImage(cvSize(initialSize+
                                                2*border,initialSize+2*border),
                                                    img->depth,img->nChannels);
      cvSetZero(dst);

      double a                          =   temp2->data.db[0];
      double b                          =   temp2->data.db[1];
      double det                        =   a*a+b*b;
      double tx                         =   temp2->data.db[2];
      double ty                         =   temp2->data.db[3];

      /**Transform the image**/
      for (j=0;j<dst->height;++j){
        for (k=0;k<dst->width;++k){
          xr                            =   ((k-tx)*a+(j-ty)*b)/det;
          yr                            =   ((k-tx)*-b+(j-ty)*a)/det;
          if ((int)xr>=0 && (int)xr <img->width && (int)yr>=0
                                                 && (int)yr<img->height){
            *((unsigned char*)(dst->imageData)+j*dst->widthStep+k)=
                                    *((unsigned char*)(img->imageData)+
                                           (int)yr*img->widthStep+(int)xr);
          }

        }
      }
      cvSaveImage(fileNames[i],dst);
      cvReleaseImage(&img);
      cvReleaseImage(&dst);

    }

    /**Averge of the transformation performed so far**/
    for (j=0;j<noFeaturePoints*2;++j){
      avgData[j]                        /=  endFile-startFile+1;
    }
    /**Perform transformation on the average data**/
    CvMat* tempMat                      =   cvCreateMat(noFeaturePoints*2,4,
                                                                      CV_64FC1);
    double tempMatData[noFeaturePoints*8];
    tempMat->data.db                    =   tempMatData;
    faceDbCreatorFillData(tempMatData,avgData,noFeaturePoints);

    pseudoInverse(tempMat,stdCoords,temp2);

    for (j=0;j<noFeaturePoints;++j){
      testData[j*2]                     =   avgData[j*2]*temp2->data.db[0]-
                                            avgData[j*2+1]*temp2->data.db[1]+
                                                             temp2->data.db[2];
      testData[j*2+1]                   =   avgData[j*2]*temp2->data.db[1]+
                                            avgData[j*2+1]*temp2->data.db[0]+
                                                             temp2->data.db[3];
    }
    cvReleaseMat(&tempMat);
  }

  IplImage *img8U,*img64F;
  CvRect *cropArea;

  IplImage *finalImage32F               =   cvCreateImage(cvSize(CROPPED_WIDTH,
                                               CROPPED_HEIGHT),IPL_DEPTH_32F,1);
  IplImage *finalImage8U                =   cvCreateImage(cvSize(CROPPED_WIDTH,
                                                CROPPED_HEIGHT),IPL_DEPTH_8U,1);
  IplImage *transformImage64F;
  IplImage *transformImage32F;
  IplImage *croppedImage32F             =   cvCreateImage(cvSize(initialSize,
                                                  initialSize),IPL_DEPTH_32F,1);
  IplImage *croppedImage64F             =   cvCreateImage(cvSize(initialSize,
                                                  initialSize),IPL_DEPTH_64F,1);

  IplImage* mask                        =   cvCreateImage(cvGetSize
                                              (croppedImage64F),IPL_DEPTH_8U,1);
  maskGenerator(mask);

  /**Random transformations**/
  double scale                          =   0;
  double rotate                         =   0;
  double translateX                     =   0;
  double translateY                     =   0;

  tempStr[0]                            =   '_';
  tempStr[4]                            =   '.';
  tempStr[5]                            =   'j';
  tempStr[6]                            =   'p';
  tempStr[7]                            =   'g';
  tempStr[8]                            =   '\0';

  /**Random Number Generator**/
  CvRNG rg;

  for (i=0;i<noFiles;++i){
    img8U                               =   cvLoadImage(fileNames[i],
                                                       CV_LOAD_IMAGE_GRAYSCALE);
    img64F                              =   cvCreateImage(cvGetSize(img8U),
                                                               IPL_DEPTH_64F,1);
    cvConvertScale(img8U,img64F);
    cvReleaseImage(&img8U);

    remove(fileNames[i]);

    xr                                  =   coordsData[i][0]-stdCoordsData[0]+
                                                                         border;
    yr                                  =   coordsData[i][4]-stdCoordsData[1]+
                                                                         border;
    cvSetImageROI(img64F,cvRect(cvRound(xr),cvRound(yr),initialSize,
                                                            initialSize));
    cvCopy(img64F,croppedImage64F);

    /**Creating variations for each image**/
    for (j=0;j<NO_VARIATIONS;++j){
      lightingCorrection(croppedImage64F,mask);
      rg                                =   cvRNG(time(0)*1000*(i+20)*(j+30));

      cvConvertScale(croppedImage64F,croppedImage32F);
      cvResize(croppedImage32F,finalImage32F);
      cvConvertScale(finalImage32F,finalImage8U);
      tempStr[1]                        =   (j/100)%10+48;
      tempStr[2]                        =   (j/10)%10+48;tempStr[3]=j%10+48;

      strncpy(tempFileName,fileNames[i],strlen(fileNames[i])-4);

      tempFileName[strlen(fileNames[i])-4]
                                        ='\0';
      strcat(tempFileName,tempStr);

      cvSaveImage(tempFileName,finalImage8U);
      switch (cvRandInt(&rg)%3){
        /**Scaling**/
        case 0:
          if (cvRandInt(&rg)%2)
            scale                       =   cvRandReal(&rg)*MAX_SCALE*
                                            initialSize/CROPPED_WIDTH;
          else
            scale                       =   cvRandReal(&rg)*MIN_SCALE*
                                            initialSize/CROPPED_HEIGHT;

          transformImage64F             =   cvCreateImage(
                                            cvSize(cvRound(initialSize-2*scale),
                                            cvRound(initialSize-2*scale)),
                                            IPL_DEPTH_64F,1);

          transformImage32F             =   cvCreateImage(
                                            cvSize(cvRound(initialSize-2*scale),
                                            cvRound(initialSize-2*scale)),
                                            IPL_DEPTH_32F,1);

          cvSetImageROI(img64F,cvRect(cvRound(xr+scale),cvRound(yr+scale),
                    cvRound(initialSize-2*scale),cvRound(initialSize-2*scale)));

          cvCopy(img64F,transformImage64F);
          cvConvertScale(transformImage64F,transformImage32F);

          cvResize(transformImage32F,croppedImage32F);
          cvConvertScale(croppedImage32F,croppedImage64F);
          cvReleaseImage(&transformImage64F);
          cvReleaseImage(&transformImage32F);
          break;
        /**Rotation**/
        case 1:
          if (cvRandInt(&rg)%2)
            rotate                      =   cvRandReal(&rg)*MAX_ROTATE;
          else
            rotate                      =   cvRandReal(&rg)*MIN_ROTATE;

          cvResetImageROI(img64F);

          transformImage64F             =   cvCreateImage(cvGetSize(img64F),
                                                            IPL_DEPTH_64F,1);
          transformRotate(img64F,transformImage64F,
          &cvPoint2D64f(xr+initialSize/2,yr+initialSize/2),rotate*M_PI/180);

          cvSetImageROI(transformImage64F,
                            cvRect(xr,yr,initialSize,initialSize));

          cvCopy(transformImage64F,croppedImage64F);
          cvReleaseImage(&transformImage64F);
          break;
        default:
          /**Translation**/
          if (cvRandInt(&rg)%2){
            if (cvRandInt(&rg)%2){
              translateX                =   cvRandReal(&rg)*MAX_TRANSLATE*
                                                    initialSize/CROPPED_WIDTH;
              translateY                =   cvRandReal(&rg)*MAX_TRANSLATE*
                                                    initialSize/CROPPED_HEIGHT;
            }
            else{
              translateX                =   cvRandReal(&rg)*MIN_TRANSLATE*
                                                    initialSize/CROPPED_WIDTH;
              translateY                =   cvRandReal(&rg)*MIN_TRANSLATE*
                                                    initialSize/CROPPED_HEIGHT;
            }
          }
          else{
            if (cvRandInt(&rg)%2){
              translateX                =   cvRandReal(&rg)*MAX_TRANSLATE*
                                                    initialSize/CROPPED_WIDTH;
              translateY                =   cvRandReal(&rg)*MIN_TRANSLATE*
                                                    initialSize/CROPPED_HEIGHT;
            }
            else{
              translateX                =   cvRandReal(&rg)*MIN_TRANSLATE*
                                                    initialSize/CROPPED_WIDTH;
              translateY                =   cvRandReal(&rg)*MAX_TRANSLATE*
                                                    initialSize/CROPPED_HEIGHT;
            }
          }
          cvSetImageROI(img64F,cvRect(cvRound(xr+translateX),
                              cvRound(yr+translateY),initialSize,initialSize));
          cvCopy(img64F,croppedImage64F);
      }
    }
    cvReleaseImage(&img64F);
    cvReleaseMat(&coords[i]);
  }
  cvReleaseImage(&finalImage8U);
  cvReleaseImage(&finalImage32F);
  cvReleaseImage(&croppedImage32F);
  cvReleaseImage(&croppedImage64F);
  cvReleaseMat(&stdCoords);
  cvReleaseMat(&testMat);
  cvReleaseMat(&avgMat);
  cvReleaseMat(&temp2);
}
예제 #16
0
CvPoint2D64f PerspectiveTransform::translation() const
{
    double dHomoScale = 1. / cvmGet(*this, 2,2);
    return cvPoint2D64f(cvmGet(*this, 0, 2)*dHomoScale, cvmGet(*this, 1, 2)*dHomoScale);
};
예제 #17
0
파일: AHViewer.cpp 프로젝트: uyaponz/AHRTC
void drawHockeyBoard(cv::Mat &f)
{
    {   // 外枠 + 中央線
        CvPoint from = convH2CV(cvPoint2D64f(-HKY_W/2.0, -HKY_H/2.0),
                                cvPoint2D64f(HKY_W, HKY_H),
                                scale, cvPoint(ofsX,ofsY));
        CvPoint to   = convH2CV(cvPoint2D64f(HKY_W/2.0, HKY_H/2.0),
                                cvPoint2D64f(HKY_W, HKY_H),
                                scale, cvPoint(ofsX,ofsY));
        cv::rectangle(f, from, to, cv::Scalar(255,0,255));
        from = convH2CV(cvPoint2D64f(-HKY_W/2.0, 0.0),
                        cvPoint2D64f(HKY_W, HKY_H),
                        scale, cvPoint(ofsX,ofsY));
        to   = convH2CV(cvPoint2D64f(HKY_W/2.0, 0.0),
                        cvPoint2D64f(HKY_W, HKY_H),
                        scale, cvPoint(ofsX,ofsY));
        cv::line(f, from, to, cv::Scalar(255,0,255));
    }

    {   // 赤線
        for (int i=0; i<6; i++) { // 横方向の線
            CvPoint from = convH2CV(cvPoint2D64f(-HKY_W/2.0, HKY_H/2.0 - 400.0*i),
                                    cvPoint2D64f(HKY_W, HKY_H),
                                    scale, cvPoint(ofsX,ofsY));
            CvPoint to = convH2CV(cvPoint2D64f(HKY_W/2.0, HKY_H/2.0 - 400.0*i),
                                  cvPoint2D64f(HKY_W, HKY_H),
                                  scale, cvPoint(ofsX,ofsY));
            cv::line(f, from, to, cv::Scalar(0,0,255), 1);
        }
        for (int i=-2; i<=2; i++) { // 縦方向の線
            CvPoint from = convH2CV(cvPoint2D64f(300.0*i, -HKY_H/2.0),
                                    cvPoint2D64f(HKY_W, HKY_H),
                                    scale, cvPoint(ofsX,ofsY));
            CvPoint to = convH2CV(cvPoint2D64f(300.0*i, HKY_H/2.0),
                                  cvPoint2D64f(HKY_W, HKY_H),
                                  scale, cvPoint(ofsX,ofsY));
            cv::line(f, from, to, cv::Scalar(0,0,255), 1);
        }
    }
}
예제 #18
0
//!Estimate transform from a set of points
void SimilarityTransform::estimateFromPoints(const CvMat * points1, const CvMat * points2)
{
    //const CvMat * temp;
    //CV_SWAP(points1, points2, temp);

/*    AffineTransform * pAT = getAffineTransform();
    pAT->estimateFromPoints(points1, points2);
    delete pAT;*/
    //Umeyama's algorithm:
    //Find mean and s.d.
    int numPoints = points1->cols;
    double meanP1Data[2];
    CvMat meanP1 = cvMat(2, 1, CV_64FC1, meanP1Data);
    double meanP2Data[2];
    CvMat meanP2 = cvMat(2, 1, CV_64FC1, meanP2Data);
    cvSetZero(&meanP1);
    cvSetZero(&meanP2);
    
    for(int i = 0; i<numPoints; i++)
    {
        meanP1Data[0] += cvmGet(points1, 0, i);
        meanP1Data[1] += cvmGet(points1, 1, i);
        meanP2Data[0] += cvmGet(points2, 0, i);
        meanP2Data[1] += cvmGet(points2, 1, i);
    }

    double numPoints_inv = 1.0/numPoints;
    meanP1Data[0] *= numPoints_inv;
    meanP1Data[1] *= numPoints_inv;
    meanP2Data[0] *= numPoints_inv;
    meanP2Data[1] *= numPoints_inv;

    //Now calculate variance
    double varP1, varP2;
    varP1 = 0;
    varP2 = 0;

    double SIGMAData[4];
    CvMat SIGMA = cvMat(2, 2, CV_64FC1, SIGMAData);
    cvSetZero(&SIGMA);

    for(int i = 0; i<numPoints; i++)
    {
        double x1 = cvmGet(points1, 0, i) - meanP1Data[0];
        double y1 = cvmGet(points1, 1, i) - meanP1Data[1];
        double x2 = cvmGet(points2, 0, i) - meanP2Data[0];
        double y2 = cvmGet(points2, 1, i) - meanP2Data[1];

        varP1 += sqr(x1) + sqr(y1);
        varP2 += sqr(x2) + sqr(y2);

        SIGMAData[0] += x1*x2;
        SIGMAData[1] += x1*y2;
        SIGMAData[2] += y1*x2;
        SIGMAData[3] += y1*y2;
    }
    varP1 *= numPoints_inv;
    varP2 *= numPoints_inv;

    SIGMAData[0] *= numPoints_inv;
    SIGMAData[1] *= numPoints_inv;
    SIGMAData[2] *= numPoints_inv;
    SIGMAData[3] *= numPoints_inv;

    double DData[4];
    CvMat D = cvMat(2, 2, CV_64FC1, DData);
    cvSetZero(&D);
    double UData[4];
    CvMat U = cvMat(2, 2, CV_64FC1, UData);
    cvSetZero(&U);
    double VData[4];
    CvMat V = cvMat(2, 2, CV_64FC1, VData);
    cvSetZero(&V);
    double RotationData[4];
    CvMat Rotation = cvMat(2, 2, CV_64FC1, RotationData);
    cvSetZero(&Rotation);

    cvSVD(&SIGMA, &D, &U, &V);
    cvGEMM(&U, &V, 1, 0, 0, &Rotation, CV_GEMM_B_T);

//    theta_ = acos(RotationData[0]);
    theta_ = asin(RotationData[1]);

    scale_ = (1.0/varP1) * cvTrace(&D).val[0];

    double transData[2];
    CvMat trans = cvMat(2, 1, CV_64FC1, transData);
    cvSetZero(&trans);

    cvGEMM( &Rotation, &meanP1, -scale_, &meanP2, 1, &trans);

    translation_ = cvPoint2D64f(transData[0], transData[1]);

    //Applying this to p1 gives us p2
}
예제 #19
0
CvPoint2D64f AffineTransform::translation() const
{
    return cvPoint2D64f(cvmGet(*this, 0, 2), cvmGet(*this, 1, 2));
};