예제 #1
0
void KLTWrapper::MakeHomoGraphy(int *pnMatch, int nCnt)
{
	double h[9];
	CvMat _h = cvMat(3, 3, CV_64F, h);
	std::vector < CvPoint2D32f > pt1, pt2;
	CvMat _pt1, _pt2;
	int i;

	pt1.resize(nCnt);
	pt2.resize(nCnt);
	for (i = 0; i < nCnt; i++) {
		//REVERSE HOMOGRAPHY
		pt1[i] = points[1][pnMatch[i]];
		pt2[i] = points[0][pnMatch[i]];
	}

	_pt1 = cvMat(1, nCnt, CV_32FC2, &pt1[0]);
	_pt2 = cvMat(1, nCnt, CV_32FC2, &pt2[0]);
	if (!cvFindHomography(&_pt1, &_pt2, &_h, CV_RANSAC, 1))
//      if(!cvFindHomography( &_pt1, &_pt2, &_h, CV_LMEDS, 1))
	{
		return;
	}

	for (i = 0; i < 9; i++) {
		matH[i] = h[i];
	}
}
예제 #2
0
CvMat* BackgroundModel::_computeHomography()
{
    int n = 4; // 4 vertices
    CvMat *src = cvCreateMat(n, 2, CV_32FC1);
    CvMat *dst = cvCreateMat(n, 2, CV_32FC1);
    CvMat *homography = cvCreateMat(3, 3, CV_32FC1);

    CV_MAT_ELEM(*src, float, 0, 0) = _vertex[0].x; // topleft
    CV_MAT_ELEM(*src, float, 0, 1) = _vertex[0].y;
    CV_MAT_ELEM(*src, float, 1, 0) = _vertex[1].x; // topright
    CV_MAT_ELEM(*src, float, 1, 1) = _vertex[1].y;
    CV_MAT_ELEM(*src, float, 2, 0) = _vertex[2].x; // bottomright
    CV_MAT_ELEM(*src, float, 2, 1) = _vertex[2].y;
    CV_MAT_ELEM(*src, float, 3, 0) = _vertex[3].x; // bottomleft
    CV_MAT_ELEM(*src, float, 3, 1) = _vertex[3].y;

    CV_MAT_ELEM(*dst, float, 0, 0) = 0.0f; // topleft
    CV_MAT_ELEM(*dst, float, 0, 1) = 0.0f;
    CV_MAT_ELEM(*dst, float, 1, 0) = 1.0f; // topright
    CV_MAT_ELEM(*dst, float, 1, 1) = 0.0f;
    CV_MAT_ELEM(*dst, float, 2, 0) = 1.0f; // bottomright
    CV_MAT_ELEM(*dst, float, 2, 1) = 1.0f;
    CV_MAT_ELEM(*dst, float, 3, 0) = 0.0f; // bottomleft
    CV_MAT_ELEM(*dst, float, 3, 1) = 1.0f;

    cvFindHomography(src, dst, homography);
    cvSave("data/bgmodel/homography.yaml", homography);

    cvReleaseMat(&src);
    cvReleaseMat(&dst);

    return homography;
}
예제 #3
0
void ofxImageROI::projectToTarget(ofPoint *dstPoints)
{
	CvPoint2D32f cvsrc[4];
	CvPoint2D32f cvdst[4];

	cvsrc[0].x = cornerPoints[0].x;
	cvsrc[0].y = cornerPoints[0].y;
	cvsrc[1].x = cornerPoints[1].x;
	cvsrc[1].y = cornerPoints[1].y;
	cvsrc[2].x = cornerPoints[2].x;
	cvsrc[2].y = cornerPoints[2].y;
	cvsrc[3].x = cornerPoints[3].x;
	cvsrc[3].y = cornerPoints[3].y;

	cvdst[0].x = dstPoints[0].x;
	cvdst[0].y = dstPoints[0].y;
	cvdst[1].x = dstPoints[1].x;
	cvdst[1].y = dstPoints[1].y;
	cvdst[2].x = dstPoints[2].x;
	cvdst[2].y = dstPoints[2].y;
	cvdst[3].x = dstPoints[3].x;
	cvdst[3].y = dstPoints[3].y;

	CvMat* src_mat = cvCreateMat(4, 2, CV_32FC1);
	CvMat* dst_mat = cvCreateMat(4, 2, CV_32FC1);

	cvSetData(src_mat, cvsrc, sizeof(CvPoint2D32f));
	cvSetData(dst_mat, cvdst, sizeof(CvPoint2D32f));

	CvMat * translate = cvCreateMat(3,3,CV_32FC1); // 領域確保
    cvFindHomography(src_mat, dst_mat, translate); // ホモグラフィ計算

    // 画像サイズが変わったとき
    if (this->bAllocated() == true && inputImg.bAllocated == false) {
        projectedImage.allocate(this->width, this->height, OF_IMAGE_COLOR);
        inputImg.allocate(this->width, this->height);
        outputImg.allocate(this->width, this->height);
    }

    inputImg.setFromPixels(this->getPixelsRef()); // 画像のロード
    cvWarpPerspective(inputImg.getCvImage(), outputImg.getCvImage(), translate, CV_INTER_LINEAR+CV_WARP_FILL_OUTLIERS, cvScalarAll(255)); // 透視投影変換

    //出力結果をofImageに変換
    projectedImage.setFromPixels(outputImg.getPixels(), outputImg.getWidth(), outputImg.getHeight(), OF_IMAGE_COLOR);

    cvReleaseMat(&translate);
    cvReleaseMat(&src_mat);
    cvReleaseMat(&dst_mat);
}
/* a rough implementation for object location */
int
locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
                    const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,
                    const CvPoint src_corners[4], CvPoint dst_corners[4] )
{

    double h[9];
    CvMat _h = cvMat(3, 3, CV_64F, h);
    // vector<int> ptpairs;
    // vector<CvPoint2D32f> pt1, pt2;
    struct pairList * ptpairs = 0;
    CvMat _pt1, _pt2;
    int i, n;

    findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );

    n = (int)(ptpairs->currentItems);
    if( n < 4 )
        return 0;

   struct pair * pairA = (struct pair *) malloc(sizeof(struct pair) * n );
   struct pair * pairB = (struct pair *) malloc(sizeof(struct pair) * n );

    for( i = 0; i < n; i+=2 )
    {
       //   pairA[i].p1 = ptpairs[i].p1; //  ((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))->pt;
       //   pairA[i].p2 = ptpairs[i].p2; //  ((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))->pt;

       //   pairB[i].p1 = ptpairs[i+1].p1; //  ((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))->pt;
       //   pairB[i].p2 = ptpairs[i+1].p2; //  ((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))->pt;
    }

    _pt1 = cvMat(1, n, CV_32FC2, pairA );
    _pt2 = cvMat(1, n, CV_32FC2, pairB );
    if( !cvFindHomography( &_pt1, &_pt2, &_h, CV_RANSAC, 5 , 0 ))  { return 0; }


    for( i = 0; i < 4; i++ )
    {
        double x = src_corners[i].x, y = src_corners[i].y;
        double Z = 1./(h[6]*x + h[7]*y + h[8]);
        double X = (h[0]*x + h[1]*y + h[2])*Z;
        double Y = (h[3]*x + h[4]*y + h[5])*Z;
        dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));
    }

    return 1;
}
예제 #5
0
/* a rough implementation for object location */
int
locatePlanarObject( const CvSeq* objectKeypoints, const CvSeq* objectDescriptors,
                    const CvSeq* imageKeypoints, const CvSeq* imageDescriptors,
                    const CvPoint src_corners[4], CvPoint dst_corners[4] )
{
    double h[9];
    CvMat _h = cvMat(3, 3, CV_64F, h);
    vector<int> ptpairs;
    vector<CvPoint2D32f> pt1, pt2;
    CvMat _pt1, _pt2;
    int i, n;

#ifdef USE_FLANN
    flannFindPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#else
    findPairs( objectKeypoints, objectDescriptors, imageKeypoints, imageDescriptors, ptpairs );
#endif

    n = ptpairs.size()/2;
    if( n < 4 )
        return 0;

    pt1.resize(n);
    pt2.resize(n);
    for( i = 0; i < n; i++ )
    {
        pt1[i] = ((CvSURFPoint*)cvGetSeqElem(objectKeypoints,ptpairs[i*2]))->pt;
        pt2[i] = ((CvSURFPoint*)cvGetSeqElem(imageKeypoints,ptpairs[i*2+1]))->pt;
    }

    _pt1 = cvMat(1, n, CV_32FC2, &pt1[0] );
    _pt2 = cvMat(1, n, CV_32FC2, &pt2[0] );
    if( !cvFindHomography( &_pt1, &_pt2, &_h, CV_RANSAC, 5 ))
        return 0;

    for( i = 0; i < 4; i++ )
    {
        double x = src_corners[i].x, y = src_corners[i].y;
        double Z = 1./(h[6]*x + h[7]*y + h[8]);
        double X = (h[0]*x + h[1]*y + h[2])*Z;
        double Y = (h[3]*x + h[4]*y + h[5])*Z;
        dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));
    }

    return 1;
}
예제 #6
0
//! Find homography between matched points and translate src_corners to dst_corners
int translateCorners(IpPairVec &matches, const CvPoint src_corners[4], CvPoint dst_corners[4])
{
#ifndef LINUX
  double h[9];
  CvMat _h = cvMat(3, 3, CV_64F, h);
  std::vector<CvPoint2D32f> pt1, pt2;
  CvMat _pt1, _pt2;

  int n = (int)matches.size();
  if( n < 4 ) return 0;

  // Set vectors to correct size
  pt1.resize(n);
  pt2.resize(n);

  // Copy Ipoints from match vector into cvPoint vectors
  for(int i = 0; i < n; i++ )
  {
    pt1[i] = cvPoint2D32f(matches[i].second.x, matches[i].second.y);
    pt2[i] = cvPoint2D32f(matches[i].first.x, matches[i].first.y);
  }
  _pt1 = cvMat(1, n, CV_32FC2, &pt1[0] );
  _pt2 = cvMat(1, n, CV_32FC2, &pt2[0] );

  // Find the homography (transformation) between the two sets of points
  if(!cvFindHomography(&_pt1, &_pt2, &_h, CV_RANSAC, 5))  // this line requires opencv 1.1
    return 0;

  // Translate src_corners to dst_corners using homography
  for(int i = 0; i < 4; i++ )
  {
    double x = src_corners[i].x, y = src_corners[i].y;
    double Z = 1./(h[6]*x + h[7]*y + h[8]);
    double X = (h[0]*x + h[1]*y + h[2])*Z;
    double Y = (h[3]*x + h[4]*y + h[5])*Z;
    dst_corners[i] = cvPoint(cvRound(X), cvRound(Y));
  }
#endif
  return 1;
}
cv::Mat ImageReranker::findHomography( InputArray _points1, InputArray _points2,
                                       int method, double ransacReprojThreshold,
                                       OutputArray _mask )
{
    Mat points1 = _points1.getMat(), points2 = _points2.getMat();
    int npoints = points1.checkVector(2);
    CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
               points1.type() == points2.type());

    Mat H(3, 3, CV_64F);
    CvMat _pt1 = points1, _pt2 = points2;
    CvMat matH = H, c_mask, *p_mask = 0;
    if( _mask.needed() )
    {
        _mask.create(npoints, 1, CV_8U, -1, true);
        p_mask = &(c_mask = _mask.getMat());
    }
    bool ok = cvFindHomography( &_pt1, &_pt2, &matH, method, ransacReprojThreshold, p_mask ) > 0;
    if( !ok )
        H = Scalar(0);
    return H;
}
예제 #8
0
static Mat _findHomography( const Mat& points1, const Mat& points2,
                            int method, double ransacReprojThreshold,
                            vector<uchar>* mask )
{
    CV_Assert(points1.isContinuous() && points2.isContinuous() &&
              points1.type() == points2.type() &&
              ((points1.rows == 1 && points1.channels() == 2) ||
               points1.cols*points1.channels() == 2) &&
              ((points2.rows == 1 && points2.channels() == 2) ||
               points2.cols*points2.channels() == 2));
    
    Mat H(3, 3, CV_64F);
    CvMat _pt1 = Mat(points1), _pt2 = Mat(points2);
    CvMat matH = H, _mask, *pmask = 0;
    if( mask )
    {
        mask->resize(points1.cols*points1.rows*points1.channels()/2);
        pmask = &(_mask = cvMat(1, (int)mask->size(), CV_8U, (void*)&(*mask)[0]));
    }
    bool ok = cvFindHomography( &_pt1, &_pt2, &matH, method, ransacReprojThreshold, pmask ) > 0;
    if( !ok )
        H = Scalar(0);
    return H;
}
예제 #9
0
파일: mymat.cpp 프로젝트: SPhoenixx/NAM
void MyMat::Homography(const MyMat& src, const MyMat& dst, const double repro)
{
	cvFindHomography(src.m_data, dst.m_data, m_data, CV_RANSAC, repro);
}
예제 #10
0
파일: homo.c 프로젝트: nisiyu/video_deblur
void calc_homography(const IplImage *src, IplImage *dst[], CvMat *hom[], int image_num)
{
	CvSize size = cvSize(src->width, src->height);
	IplImage *img_prev = cvCreateImage(size, src->depth, 1);//单通道图像
	IplImage *img_curr = cvCreateImage(size, src->depth, 1);
	cvCvtColor(src, img_prev, CV_BGR2GRAY);
	
	CvPoint2D32f features[MAX_CORNERS];
	CvPoint2D32f features_curr[MAX_CORNERS];
	int corner_count = MAX_CORNERS;
	
	int t1 = clock();
	cvGoodFeaturesToTrack(img_prev, NULL, NULL, features, &corner_count, 0.02, 0.5, NULL, 3, 0, 0.04);
	//good features to track 得到的features 相当于输出
	//quality_level 0.01表示一点被认为是角点的最小特征值
	//min_distance 0.5角点间的距离不小于x个像素
	st1 += clock()-t1;
	
	t1 = clock();
	cvFindCornerSubPix(img_prev, features, corner_count, cvSize(WIN_SIZE,WIN_SIZE), cvSize(-1, -1), cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.03));
	//求更加精细的亚像素级角点
	//window的大小21*21
	st2 += clock()-t1;
	
	char feature_found[MAX_CORNERS];
	float feature_error[MAX_CORNERS];
	CvPoint2D32f good_src[MAX_CORNERS];
	CvPoint2D32f good_dst[MAX_CORNERS];
	CvSize pyr_size = cvSize(img_prev->width + 8, img_prev->height/3);//?
	IplImage *pyr_prev = cvCreateImage(pyr_size, IPL_DEPTH_32F, 1);//两幅金字塔图像缓存
	IplImage *pyr_curr = cvCreateImage(pyr_size, IPL_DEPTH_32F, 1);
	
	for (int k = 0; k < image_num; ++k)
	{
		cvCvtColor(dst[k], img_curr, CV_BGR2GRAY);
		t1 = clock();
		cvCalcOpticalFlowPyrLK(img_prev, img_curr, pyr_prev, pyr_curr, features, features_curr, corner_count, cvSize(WIN_SIZE,WIN_SIZE), 5, feature_found, feature_error, cvTermCriteria(CV_TERMCRIT_EPS | CV_TERMCRIT_ITER, 20, 0.03), 0);
		//计算光流 金字塔 level为5
		//得到的最终图像为img_curr
		//features_found得到的长度为corner的count
		st3 += clock()-t1;
	
		int good_num = 0;
		for (int i = 0; i < corner_count; ++i)
		{
			if (feature_found[i] != 0 && feature_error[i] < 550)
			//比较好的feature记录
			{
				good_src[good_num] = features[i];
				good_dst[good_num] = features_curr[i];
				++good_num;
			}
		}
	
		if (good_num >= 4)
		{
			CvMat pt_src = cvMat(1, good_num, CV_32FC2, good_src);
			CvMat pt_dst = cvMat(1, good_num, CV_32FC2, good_dst);
			
			t1 = clock();
			cvFindHomography(&pt_src, &pt_dst, hom[k], CV_RANSAC, 5, NULL);
			st4 += clock()-t1;
		}
		else fprintf(stderr, "Unable to calc homography : %d\n", k);
	}
	cvReleaseImage(&pyr_prev);
	cvReleaseImage(&pyr_curr);
	cvReleaseImage(&img_prev);
	cvReleaseImage(&img_curr);
}
bool
cvFindExtrinsicCameraParams3( const CvMat* obj_points,
                  const CvMat* img_points, const CvMat* A,
                  const CvMat* dist_coeffs,
                  CvMat* r_vec, CvMat* t_vec )
{
    bool fGood = true;

    const int max_iter = 20;
    CvMat *_M = 0, *_Mxy = 0, *_m = 0, *_mn = 0, *_L = 0, *_J = 0;
    
    CV_FUNCNAME( "cvFindExtrinsicCameraParams3" );

    __BEGIN__;

    int i, j, count;
    double a[9], k[4] = { 0, 0, 0, 0 }, R[9], ifx, ify, cx, cy;
    double Mc[3] = {0, 0, 0}, MM[9], U[9], V[9], W[3];
    double JtJ[6*6], JtErr[6], JtJW[6], JtJV[6*6], delta[6], param[6];
    CvPoint3D64f* M = 0;
    CvPoint2D64f *m = 0, *mn = 0;
    CvMat _a = cvMat( 3, 3, CV_64F, a );
    CvMat _R = cvMat( 3, 3, CV_64F, R );
    CvMat _r = cvMat( 3, 1, CV_64F, param );
    CvMat _t = cvMat( 3, 1, CV_64F, param + 3 );
    CvMat _Mc = cvMat( 1, 3, CV_64F, Mc );
    CvMat _MM = cvMat( 3, 3, CV_64F, MM );
    CvMat _U = cvMat( 3, 3, CV_64F, U );
    CvMat _V = cvMat( 3, 3, CV_64F, V );
    CvMat _W = cvMat( 3, 1, CV_64F, W );
    CvMat _JtJ = cvMat( 6, 6, CV_64F, JtJ );
    CvMat _JtErr = cvMat( 6, 1, CV_64F, JtErr );
    CvMat _JtJW = cvMat( 6, 1, CV_64F, JtJW );
    CvMat _JtJV = cvMat( 6, 6, CV_64F, JtJV );
    CvMat _delta = cvMat( 6, 1, CV_64F, delta );
    CvMat _param = cvMat( 6, 1, CV_64F, param );
    CvMat _dpdr, _dpdt;

    if( !CV_IS_MAT(obj_points) || !CV_IS_MAT(img_points) ||
        !CV_IS_MAT(A) || !CV_IS_MAT(r_vec) || !CV_IS_MAT(t_vec) )
        CV_ERROR( CV_StsBadArg, "One of required arguments is not a valid matrix" );

    count = MAX(obj_points->cols, obj_points->rows);
    CV_CALL( _M = cvCreateMat( 1, count, CV_64FC3 ));
    CV_CALL( _Mxy = cvCreateMat( 1, count, CV_64FC2 ));
    CV_CALL( _m = cvCreateMat( 1, count, CV_64FC2 ));
    CV_CALL( _mn = cvCreateMat( 1, count, CV_64FC2 ));
    M = (CvPoint3D64f*)_M->data.db;
    m = (CvPoint2D64f*)_m->data.db;
    mn = (CvPoint2D64f*)_mn->data.db;

    CV_CALL( cvConvertPointsHomogenious( obj_points, _M ));
    CV_CALL( cvConvertPointsHomogenious( img_points, _m ));
    CV_CALL( cvConvert( A, &_a ));

    if( dist_coeffs )
    {
        CvMat _k;
        if( !CV_IS_MAT(dist_coeffs) ||
            CV_MAT_DEPTH(dist_coeffs->type) != CV_64F &&
            CV_MAT_DEPTH(dist_coeffs->type) != CV_32F ||
            dist_coeffs->rows != 1 && dist_coeffs->cols != 1 ||
            dist_coeffs->rows*dist_coeffs->cols*CV_MAT_CN(dist_coeffs->type) != 4 )
            CV_ERROR( CV_StsBadArg,
                "Distortion coefficients must be 1x4 or 4x1 floating-point vector" );

        _k = cvMat( dist_coeffs->rows, dist_coeffs->cols,
                    CV_MAKETYPE(CV_64F,CV_MAT_CN(dist_coeffs->type)), k );
        CV_CALL( cvConvert( dist_coeffs, &_k ));
    }

    if( CV_MAT_DEPTH(r_vec->type) != CV_64F && CV_MAT_DEPTH(r_vec->type) != CV_32F ||
        r_vec->rows != 1 && r_vec->cols != 1 ||
        r_vec->rows*r_vec->cols*CV_MAT_CN(r_vec->type) != 3 )
        CV_ERROR( CV_StsBadArg, "Rotation vector must be 1x3 or 3x1 floating-point vector" );

    if( CV_MAT_DEPTH(t_vec->type) != CV_64F && CV_MAT_DEPTH(t_vec->type) != CV_32F ||
        t_vec->rows != 1 && t_vec->cols != 1 ||
        t_vec->rows*t_vec->cols*CV_MAT_CN(t_vec->type) != 3 )
        CV_ERROR( CV_StsBadArg,
            "Translation vector must be 1x3 or 3x1 floating-point vector" );

    ifx = 1./a[0]; ify = 1./a[4];
    cx = a[2]; cy = a[5];

    // normalize image points
    // (unapply the intrinsic matrix transformation and distortion)
    for( i = 0; i < count; i++ )
    {
        double x = (m[i].x - cx)*ifx, y = (m[i].y - cy)*ify, x0 = x, y0 = y;

        // compensate distortion iteratively
        if( dist_coeffs )
            for( j = 0; j < 5; j++ )
            {
                double r2 = x*x + y*y;
                double icdist = 1./(1 + k[0]*r2 + k[1]*r2*r2);
                double delta_x = 2*k[2]*x*y + k[3]*(r2 + 2*x*x);
                double delta_y = k[2]*(r2 + 2*y*y) + 2*k[3]*x*y;
                x = (x0 - delta_x)*icdist;
                y = (y0 - delta_y)*icdist;
            }
        mn[i].x = x; mn[i].y = y;

        // calc mean(M)
        Mc[0] += M[i].x;
        Mc[1] += M[i].y;
        Mc[2] += M[i].z;
    }

    Mc[0] /= count;
    Mc[1] /= count;
    Mc[2] /= count;

    cvReshape( _M, _M, 1, count );
    cvMulTransposed( _M, &_MM, 1, &_Mc );
    cvSVD( &_MM, &_W, 0, &_V, CV_SVD_MODIFY_A + CV_SVD_V_T );

    // initialize extrinsic parameters
    if( W[2]/W[1] < 1e-3 || count < 4 )
    {
        // a planar structure case (all M's lie in the same plane)
        double tt[3], h[9], h1_norm, h2_norm;
        CvMat* R_transform = &_V;
        CvMat T_transform = cvMat( 3, 1, CV_64F, tt );
        CvMat _H = cvMat( 3, 3, CV_64F, h );
        CvMat _h1, _h2, _h3;

        if( V[2]*V[2] + V[5]*V[5] < 1e-10 )
            cvSetIdentity( R_transform );

        if( cvDet(R_transform) < 0 )
            cvScale( R_transform, R_transform, -1 );

        cvGEMM( R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T );

        for( i = 0; i < count; i++ )
        {
            const double* Rp = R_transform->data.db;
            const double* Tp = T_transform.data.db;
            const double* src = _M->data.db + i*3;
            double* dst = _Mxy->data.db + i*2;

            dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0];
            dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1];
        }

        cvFindHomography( _Mxy, _mn, &_H );

        cvGetCol( &_H, &_h1, 0 );
        _h2 = _h1; _h2.data.db++;
        _h3 = _h2; _h3.data.db++;
        h1_norm = sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]);
        h2_norm = sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]);

        cvScale( &_h1, &_h1, 1./h1_norm );
        cvScale( &_h2, &_h2, 1./h2_norm );
        cvScale( &_h3, &_t, 2./(h1_norm + h2_norm));
        cvCrossProduct( &_h1, &_h2, &_h3 );

        cvRodrigues2( &_H, &_r );
        cvRodrigues2( &_r, &_H );
        cvMatMulAdd( &_H, &T_transform, &_t, &_t );
        cvMatMul( &_H, R_transform, &_R );
        cvRodrigues2( &_R, &_r );
    }
    else
    {
        // non-planar structure. Use DLT method
        double* L;
        double LL[12*12], LW[12], LV[12*12], sc;
        CvMat _LL = cvMat( 12, 12, CV_64F, LL );
        CvMat _LW = cvMat( 12, 1, CV_64F, LW );
        CvMat _LV = cvMat( 12, 12, CV_64F, LV );
        CvMat _RRt, _RR, _tt;

        CV_CALL( _L = cvCreateMat( 2*count, 12, CV_64F ));
        L = _L->data.db;

        for( i = 0; i < count; i++, L += 24 )
        {
            double x = -mn[i].x, y = -mn[i].y;
            L[0] = L[16] = M[i].x;
            L[1] = L[17] = M[i].y;
            L[2] = L[18] = M[i].z;
            L[3] = L[19] = 1.;
            L[4] = L[5] = L[6] = L[7] = 0.;
            L[12] = L[13] = L[14] = L[15] = 0.;
            L[8] = x*M[i].x;
            L[9] = x*M[i].y;
            L[10] = x*M[i].z;
            L[11] = x;
            L[20] = y*M[i].x;
            L[21] = y*M[i].y;
            L[22] = y*M[i].z;
            L[23] = y;
        }

        cvMulTransposed( _L, &_LL, 1 );
        cvSVD( &_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T );
        _RRt = cvMat( 3, 4, CV_64F, LV + 11*12 );
        cvGetCols( &_RRt, &_RR, 0, 3 );
        cvGetCol( &_RRt, &_tt, 3 );
        if( cvDet(&_RR) < 0 )
            cvScale( &_RRt, &_RRt, -1 );
        sc = cvNorm(&_RR);
        cvSVD( &_RR, &_W, &_U, &_V, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
        cvGEMM( &_U, &_V, 1, 0, 0, &_R, CV_GEMM_A_T );
        cvScale( &_tt, &_t, cvNorm(&_R)/sc );
        cvRodrigues2( &_R, &_r );
        cvReleaseMat( &_L );
    }

    //
    // Check if new r and t are good
    //
    if ( cvGetReal1D( r_vec, 0 ) ||
         cvGetReal1D( r_vec, 1 ) ||
         cvGetReal1D( r_vec, 2 ) ||
         cvGetReal1D( t_vec, 0 ) ||
         cvGetReal1D( t_vec, 1 ) ||
         cvGetReal1D( t_vec, 2 ) )
    {
        //
        // perfom this only when the old r and t exist.
        //
        CvMat * R_inv = cvCreateMat( 3, 3, CV_64FC1 );
        CvMat * r_old = cvCreateMat( 3, 1, CV_64FC1 );
        CvMat * R_old = cvCreateMat( 3, 3, CV_64FC1 );
        CvMat * t_old = cvCreateMat( 3, 1, CV_64FC1 );
        // get new center
        cvInvert( &_R, R_inv );
        double new_center[3];
        CvMat newCenter = cvMat( 3, 1, CV_64FC1, new_center );
        cvMatMul( R_inv, &_t, &newCenter );
        cvScale( &newCenter, &newCenter, -1 );
        // get old center
        cvConvert( r_vec, r_old );
        cvConvert( t_vec, t_old );
        cvRodrigues2( r_old, R_old );
        cvInvert( R_old, R_inv );
        double old_center[3];
        CvMat oldCenter = cvMat( 3, 1, CV_64FC1, old_center );
        cvMatMul( R_inv, t_old, &oldCenter );
        cvScale( &oldCenter, &oldCenter, -1 );
        // get difference
        double diff_center = 0;
        for ( int i = 0; i < 3 ; i ++ )
            diff_center += pow( new_center[i] - old_center[i], 2 );
        diff_center = sqrt( diff_center );
        if ( diff_center > 300 )
        {
            printf("diff_center = %.2f --> set initial r and t as same as  the previous\n", diff_center);
            cvConvert(r_vec, &_r);
            cvConvert(t_vec, &_t);
            fGood = false;
        }
//        else printf("diff_center = %.2f\n", diff_center );
        
        cvReleaseMat( &R_inv );
        cvReleaseMat( &r_old );
        cvReleaseMat( &R_old );
        cvReleaseMat( &t_old );
    }
    
    if ( fGood )
    {
        CV_CALL( _J = cvCreateMat( 2*count, 6, CV_64FC1 ));
        cvGetCols( _J, &_dpdr, 0, 3 );
        cvGetCols( _J, &_dpdt, 3, 6 );

        // refine extrinsic parameters using iterative algorithm
        for( i = 0; i < max_iter; i++ )
        {
            double n1, n2;
            cvReshape( _mn, _mn, 2, 1 );
            cvProjectPoints2( _M, &_r, &_t, &_a, dist_coeffs,
                              _mn, &_dpdr, &_dpdt, 0, 0, 0 );
            cvSub( _m, _mn, _mn );
            cvReshape( _mn, _mn, 1, 2*count );

            cvMulTransposed( _J, &_JtJ, 1 );
            cvGEMM( _J, _mn, 1, 0, 0, &_JtErr, CV_GEMM_A_T );
            cvSVD( &_JtJ, &_JtJW, 0, &_JtJV, CV_SVD_MODIFY_A + CV_SVD_V_T );
            if( JtJW[5]/JtJW[0] < 1e-12 )
                break;
            cvSVBkSb( &_JtJW, &_JtJV, &_JtJV, &_JtErr,
                      &_delta, CV_SVD_U_T + CV_SVD_V_T );
            cvAdd( &_delta, &_param, &_param );
            n1 = cvNorm( &_delta );
            n2 = cvNorm( &_param );
            if( n1/n2 < 1e-10 )
                break;
        }

        _r = cvMat( r_vec->rows, r_vec->cols,
            CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), param );
        _t = cvMat( t_vec->rows, t_vec->cols,
            CV_MAKETYPE(CV_64F,CV_MAT_CN(t_vec->type)), param + 3 );

        cvConvert( &_r, r_vec );
        cvConvert( &_t, t_vec );
    }

    __END__;

    cvReleaseMat( &_M );
    cvReleaseMat( &_Mxy );
    cvReleaseMat( &_m );
    cvReleaseMat( &_mn );
    cvReleaseMat( &_L );
    cvReleaseMat( &_J );

    return fGood;
}
void
DotTracker::spin()
{
	int count = 0;
	boost::format fmtWindowTitle ("ViSP Dot tracker - [ns: %s]");
    fmtWindowTitle % ros::this_node::getNamespace ();

    vpDisplayX d(image_, image_.getWidth(), image_.getHeight(),
		 fmtWindowTitle.str().c_str());

	ros::Rate loop_rate_tracking(200);
	bool ok = false;
	vpHomogeneousMatrix cMo;
	vpImagePoint point (10, 10);
	while (!ok && ros::ok())
	{
		
		try
		  {
			//ROS_INFO_STREAM("spin once area");
			vpDisplay::display(image_);
			
			for(uint i=0;i<trackers_.size();i++){
				try{
					if(trackers_status_[i] == TRACKING){
						trackers_[i]->track(image_);
						trackers_[i]->display(image_, vpColor::red);
					}
				}catch(...){
					ROS_ERROR_STREAM("failed to track dot " << i);
					trackers_status_[i] = NOT_TRACKING;
				}
				
			}
			
			int N = points_.size();
			
			double srcData[N*2];
			double dstData[N*2];
			CvMat * src = cvCreateMat( N, 3, CV_64FC1);
			CvMat * dst = cvCreateMat( N, 3, CV_64FC1);
			
			CvMat * mask= cvCreateMat(1, N, CV_8UC1);
			for(int i = 0; i < N; i++ ){
				//model is src
				src->data.db[i*3] = points_[i].X;
				src->data.db[i*3+1] = points_[i].Y;
				src->data.db[i*3+2] = 1;
				
				//screen is dst
				dst->data.db[i*3] = trackers_[i]->getCog().get_i();
				dst->data.db[i*3+1] = trackers_[i]->getCog().get_j();
				dst->data.db[i*3+2] = 1;
				
				//ROS_INFO_STREAM("trackers_[i]->getCog() =" << trackers_[i]->getCog().get_i() << ", " << trackers_[i]->getCog().get_j());	
				//ROS_INFO_STREAM("points_[i] =" << points_[i].X << ", " << points_[i].Y);	
			}
			
				
			
			//cvFindHomography(src, dst, homography_, CV_LMEDS, 0, mask);
			cvFindHomography(src, dst, homography_, CV_RANSAC, 10, mask);
			
			std_msgs::Float64MultiArray homography_msg;
			
			//homography_msg.layout.set_dim_size(2);
			homography_msg.layout.dim.push_back(std_msgs::MultiArrayDimension());
			homography_msg.layout.dim[0].label  = "height";
			homography_msg.layout.dim[0].size   = 3;
			homography_msg.layout.dim[0].stride = 3*3;
			homography_msg.layout.dim.push_back(std_msgs::MultiArrayDimension());
			homography_msg.layout.dim[1].label  = "width";
			homography_msg.layout.dim[1].size   = 3;
			homography_msg.layout.dim[1].stride = 3;
			
			homography_msg.data.resize(3*3);
			for (int i = 0; i < homography_->rows; i++){
				for (int j = 0; j < homography_->cols; j++){
					float val = (float)cvGetReal2D(homography_, i, j);
					homography_msg.data[j + 3 * i] = val;
				}	
			}
			
			homography_pub_.publish(homography_msg);
			
			/*
			if(count++ % 50 == 0){
					count =0;
					printMat(homography_);
			}*/
			
			//if(count == 49) return;	
						
			for(int i = 0; i < N; i++ ){ 
				//ROS_INFO_STREAM("mask " << i << " = " << mask->data.ptr[i]);	
				
				if(((int)(mask->data.ptr[i])) == 0 || trackers_status_[i] == NOT_TRACKING){
					trackers_status_[i] = NOT_TRACKING;			
					//note we have to transpose
					CvMat * dst2 = cvCreateMat( 3, 1, CV_64FC1); //screen (unkown)
					CvMat * src2 = cvCreateMat( 3, 1, CV_64FC1); //model (known)
					
					src2->data.db[0] = points_[i].X;
					src2->data.db[1] = points_[i].Y;
					src2->data.db[2] = 1.0;
					
					cvMatMul(homography_, src2, dst2);
					
					dst2->data.db[0] /= dst2->data.db[2];
					dst2->data.db[1] /= dst2->data.db[2];
					dst2->data.db[2] /= dst2->data.db[2];
					
					//ROS_INFO_STREAM("trackers_[i]->getCog() =" << trackers_[i]->getCog().get_i() << ", " << trackers_[i]->getCog().get_j());	
					//ROS_INFO_STREAM("for point number: " << i << " model x = " << points_[i].X << " model y = " << points_[i].Y);
					//ROS_INFO_STREAM("setting tracker "<< i << " to x = "<< dst2->data.db[0] << ", y = " << dst2->data.db[1] << "," << dst2->data.db[2]); 	
					
					try{
						//trackers_[i]->initTracking(image_, vpImagePoint(dst2->data.db[0], dst2->data.db[1]) , 15);//dodgy???	
						//trackers_[i]->getCog().set_i(dst2->data.db[0]);
						//trackers_[i]->getCog().set_j(dst2->data.db[1]);
						
						//ROS_INFO_STREAM("setting tracker "<< i << " to x = "<< dst2->data.db[0] << ", y = " << dst2->data.db[1] << "," << dst2->data.db[2]); 	
						
						//cut of any that go out of screen bounds
						if(dst2->data.db[0] < 1) continue;
						if(dst2->data.db[1] < 1) continue;
						if(dst2->data.db[0] > 478) continue;//I have no why idea the x needs to be set to 480 and not 640?
						if(dst2->data.db[1] > 638) continue;
						
						boost::shared_ptr<vpDot2> tracker(new vpDot2());
		
						tracker->setSizePrecision(.1);
						tracker->setEllipsoidShapePrecision(.8);
						tracker->setGrayLevelPrecision(.75);
						
						trackers_[i] = tracker;
						trackers_[i]->initTracking(image_, vpImagePoint(dst2->data.db[0], dst2->data.db[1]) , 15);
						/*
						trackers_[i]->track(image_);
						trackers_[i]->display(image_, vpColor::green);
						*/
						trackers_status_[i] = TRACKING;
					}catch(...){
						ROS_ERROR_STREAM("failed to track dot " << i);
						trackers_status_[i] = NOT_TRACKING;
					}
					
					//trackers_[i]->getCog().set_i(dst2->data.db[0]);
					//trackers_[i]->getCog().set_j(dst2->data.db[1]);
				}
			}
			/*
			ROS_INFO_STREAM("mask=");
			for(int i = 0; i < N; i++ ){
					ROS_INFO_STREAM((int)mask->data.ptr[i]<<" ");
			}
			ROS_INFO("\n");
			for(int i = 0; i < 3; i++ ){
				for(int j = 0; j < 3; j++ ){
				
					ROS_INFO_STREAM(hom_ret->data.db[i + j*3]<<" ");
				}
				ROS_INFO("\n");
			}
			ROS_INFO("\n");
			*/
			vpDisplay::flush(image_);
			ros::spinOnce();
			loop_rate_tracking.sleep();
		  }
		catch(const std::runtime_error& e)
		  {
			ROS_ERROR_STREAM("C failed to initialize: "
					 << e.what() << ", retrying...");
		  }
		catch(const std::string& str)
		  {
			ROS_ERROR_STREAM("B failed to initialize: "
					 << str << ", retrying...");
		  }
		catch(...)
		  {
			ROS_ERROR("A failed to initialize, retrying...");
		  }
	}
}
예제 #13
0
int ComputeHomographyFromPointCorrespondanceOpenCV ( struct FeatureList * source,
                                                     struct CameraCalibrationData * calibration,
                                                     struct TransformationMatrix * rotation_matrix,
                                                     struct TransformationMatrix * translation_matrix,
                                                     struct TransformationMatrix * rotation_and_translation_matrix,
                                                     struct TransformationMatrix * homography_matrix ,
                                                     unsigned int render_warped_image
                                                     )
{
   if ( source->current_features == 0 ) { return 0; }
   if ( source->current_features < 4) { return 0; }

   int i=0 , res = 1;
   unsigned int points_limit = source->current_features;
   //points_limit  = 4; // If we want just a perspective Transform and not a Homography

   CvMat* srcPoints = cvCreateMat(2,points_limit,CV_32FC1);
   if ( srcPoints != 0 )
   {
    for ( i=0; i<points_limit;  i++ )
     {   cvmSet(srcPoints,0,i,(float) source->list[i].last_x);
         cvmSet(srcPoints,1,i,(float) source->list[i].last_y);
     }
   }

   CvMat* dstPoints = cvCreateMat(2,points_limit,CV_32FC1);
   if ( dstPoints != 0 )
   {
    for ( i=0; i<points_limit; i++ )
     {   cvmSet(dstPoints,0,i,(float) source->list[i].x);
         cvmSet(dstPoints,1,i,(float) source->list[i].y);
     }
   }


   CvMat* status=0;
   CvMat* H =  cvCreateMat(3,3,CV_64F);
   cvZero(H);

    res = cvFindHomography(srcPoints,dstPoints,H,CV_RANSAC,2.5,status);
    //cvPerspectiveTransform(srcPoints,dstPoints, H);


   i=0;
   int mem=0,j=0;
   homography_matrix->rows=3;
   homography_matrix->columns=3;
    for(i=0; i<3; i++)
     { for(j=0; j<3; j++)
       {
         homography_matrix->item[mem++]=cvmGet(H,i,j);
       }
     }


   //  THIS OVERLAYS WARPED IMAGE OF LAST VIEW
 if (render_warped_image)
 {
   IplImage  * image = cvCreateImage( cvSize(320,240), IPL_DEPTH_8U, 3 );
   memcpy(image->imageData , video_register[CALIBRATED_LEFT_EYE].pixels , metrics[RESOLUTION_MEMORY_LIMIT_3BYTE]);
   IplImage  * dstImg = cvCloneImage(image);


   cvWarpPerspective(image, dstImg, H , CV_INTER_CUBIC | CV_WARP_FILL_OUTLIERS, cvScalarAll(0) );
   memcpy( video_register[LAST_RIGHT_OPERATION].pixels , dstImg->imageData , metrics[RESOLUTION_MEMORY_LIMIT_3BYTE]);
   video_register[LAST_RIGHT_OPERATION].time = video_register[CALIBRATED_LEFT_EYE].time;
   cvReleaseImage( &image );
   cvReleaseImage( &dstImg );
 }


   // transformed output image
   CvMat*  intriMat=cvCreateMat(3,3,CV_64F); //cvMat(3,3,CV_64F,calibration->intrinsic_parameters_array);
           cvmSet(intriMat,0,0,calibration->intrinsic_parameters_array[0]); cvmSet(intriMat,0,1,calibration->intrinsic_parameters_array[1]); cvmSet(intriMat,0,2,calibration->intrinsic_parameters_array[2]);
           cvmSet(intriMat,1,0,calibration->intrinsic_parameters_array[3]); cvmSet(intriMat,1,1,calibration->intrinsic_parameters_array[4]); cvmSet(intriMat,1,2,calibration->intrinsic_parameters_array[5]);
           cvmSet(intriMat,2,0,calibration->intrinsic_parameters_array[6]); cvmSet(intriMat,2,1,calibration->intrinsic_parameters_array[7]); cvmSet(intriMat,2,2,calibration->intrinsic_parameters_array[8]);



   CvMat*  homography_decomposition_to_translation_and_rotation = CreateHomographyRotationTranslationMatrix(H,intriMat);


   if ( homography_decomposition_to_translation_and_rotation != 0 )
   {
      ConvertMatrices( rotation_matrix,translation_matrix,rotation_and_translation_matrix , homography_decomposition_to_translation_and_rotation );
   }

   if ( srcPoints != 0 ) { cvReleaseMat(&srcPoints); }
   if ( dstPoints != 0 ) { cvReleaseMat(&dstPoints); }
   if ( H != 0 ) { cvReleaseMat(&H); }
   if ( homography_decomposition_to_translation_and_rotation != 0 ) { cvReleaseMat(&homography_decomposition_to_translation_and_rotation); }
   if ( intriMat != 0 ) { cvReleaseMat(&intriMat); }

   return res;
}
예제 #14
0
bool THIS::computeHomography()
{
  double homMat[9];
  CvMat homMatCv;

  memset ( homMat, 0, 9*sizeof ( double ) );
  homMatCv = cvMat ( 3, 3, CV_64F, homMat );

  std::vector<CvPoint2D32f> points1Cv, points2Cv;
  CvMat points1CvMat, points2CvMat;

  int numMatches = m_Matches.size();

  if ( numMatches < 4 )
  {
    return false;
  }

  // Set vectors to correct size
  points1Cv.resize ( numMatches );
  points2Cv.resize ( numMatches );

  // Copy Ipoints from match vector into cvPoint vectors
  std::list<KeyPointMatch>::iterator currentMatch = m_Matches.begin();
  int i = 0;
  while ( currentMatch != m_Matches.end() )
  {
    points1Cv[i] = cvPoint2D32f ( ( *m_KeyPoints1 ) [ currentMatch->index1 ].x,
                                  ( *m_KeyPoints1 ) [ currentMatch->index1 ].y );
    points2Cv[i] = cvPoint2D32f ( ( *m_KeyPoints2 ) [ currentMatch->index2 ].x,
                                  ( *m_KeyPoints2 ) [ currentMatch->index2 ].y );
    currentMatch++;
    i++;
  }

  points1CvMat = cvMat ( 1, numMatches, CV_32FC2, &points1Cv[0] );
  points2CvMat = cvMat ( 1, numMatches, CV_32FC2, &points2Cv[0] );

  // Find the homography (transformation) between the two sets of points

  //0 - regular method using all the point pairs
  //CV_RANSAC - RANSAC-based robust method
  //CV_LMEDS - Least-Median robust method

  int method = 0;
  switch (CV_RANSAC)//Config::getInstance()->getInt( "ObjectRecognition.Homography.iMethod" ))
  {
    case 0 :
      method = 0;
      break;
    case 1 :
      method = CV_RANSAC;
      break;
    case 2 :
      method = CV_LMEDS;
      break;
    default:
      //TRACE_ERROR("Undefined methode to find homography"); // TODO use ros
      break;
  }

  m_Success = cvFindHomography( &points2CvMat, &points1CvMat, &homMatCv, method, m_MaxReprojectionError );

//   float n=sqrt(homMat[0]*homMat[0]+homMat[3]*homMat[3]) * sqrt(homMat[1]*homMat[1]+homMat[4]*homMat[4]);
//
//   float det = homMat[0]*homMat[4] - homMat[1]*homMat[3];
//   det /= n;
//   float l = fabs( det );
//
//   if ( l < 0.8 )
//   {
//     m_Success = false;
//   }
//
//   TRACE_INFO( "det: " << det );
//
//   /*
//   float scalar= homMat[0]*homMat[1] + homMat[3]*homMat[4];
//   scalar /= n;
//   */

  m_Homography = Homography( homMat );

  return m_Success;
}
예제 #15
0
//--------------------------------------------------------------
void ofxGLWarper::processMatrices(){
	//we set it to the default - 0 translation
	//and 1.0 scale for x y z and w
    myMatrix = ofMatrix4x4(); // default constructor generates identity
	
	//we need our points as opencv points
	//be nice to do this without opencv?
	CvPoint2D32f cvsrc[4];
	CvPoint2D32f cvdst[4];	
	
	//we set the warp coordinates
	//source coordinates as the dimensions of our window
	cvsrc[0].x = x;
	cvsrc[0].y = y;
	cvsrc[1].x = x+width;
	cvsrc[1].y = y;
	cvsrc[2].x = x+width;
	cvsrc[2].y = y+height;
	cvsrc[3].x = x;
	cvsrc[3].y = y+height;			
	
	//corners are in 0.0 - 1.0 range
	//so we scale up so that they are at the window's scale
	for(int i = 0; i < 4; i++){
		//cvdst[i].x = corners[i].x  * (float)width;
		//cvdst[i].y = corners[i].y * (float)height;
        cvdst[i].x = corners[i].x;
		cvdst[i].y = corners[i].y;
	}
	
	//we create a matrix that will store the results
	//from openCV - this is a 3x3 2D matrix that is
	//row ordered
	CvMat * translate = cvCreateMat(3,3,CV_32FC1);
	
	//this is the slightly easier - but supposidly less
	//accurate warping method 
	//cvWarpPerspectiveQMatrix(cvsrc, cvdst, translate); 
	
	
	//for the more accurate method we need to create
	//a couple of matrixes that just act as containers
	//to store our points  - the nice thing with this 
	//method is you can give it more than four points!
	
	CvMat* src_mat = cvCreateMat( 4, 2, CV_32FC1 );
	CvMat* dst_mat = cvCreateMat( 4, 2, CV_32FC1 );
	
	//copy our points into the matrixes
	cvSetData( src_mat, cvsrc, sizeof(CvPoint2D32f));
	cvSetData( dst_mat, cvdst, sizeof(CvPoint2D32f));
	
	//figure out the warping!
	//warning - older versions of openCV had a bug
	//in this function.
	cvFindHomography(src_mat, dst_mat, translate);
	
	//get the matrix as a list of floats
	float *matrix = translate->data.fl;
	
	
	//we need to copy these values
	//from the 3x3 2D openCV matrix which is row ordered
	//
	// ie:   [0][1][2] x
	//       [3][4][5] y
	//       [6][7][8] w
	
	//to openGL's 4x4 3D column ordered matrix
	//        x  y  z  w   
	// ie:   [0][3][ ][6]
	//       [1][4][ ][7]
	//		 [ ][ ][ ][ ]
	//       [2][5][ ][8]
	//       

        myMatrix(0,0) = matrix[0];
        myMatrix(1,0) = matrix[1];
        myMatrix(3,0) = matrix[2];

        myMatrix(0,1) = matrix[3];
        myMatrix(1,1) = matrix[4];
        myMatrix(3,1) = matrix[5];

        myMatrix(0,3) = matrix[6];
        myMatrix(1,3) = matrix[7];
        myMatrix(3,3) = matrix[8];
}
예제 #16
0
ofMatrix4x4 QuadWarp::getMatrix(ofPoint * srcPoints, ofPoint * dstPoints) {

	//we need our points as opencv points
	CvPoint2D32f cvsrc[4];
	CvPoint2D32f cvdst[4];

	//we set the warp coordinates
	//source coordinates as the dimensions of our window
	cvsrc[0].x = srcPoints[0].x;
	cvsrc[0].y = srcPoints[0].y;
	cvsrc[1].x = srcPoints[1].x;
	cvsrc[1].y = srcPoints[1].y;
	cvsrc[2].x = srcPoints[2].x;
	cvsrc[2].y = srcPoints[2].y;
	cvsrc[3].x = srcPoints[3].x;
	cvsrc[3].y = srcPoints[3].y;

	cvdst[0].x = dstPoints[0].x;
	cvdst[0].y = dstPoints[0].y;
	cvdst[1].x = dstPoints[1].x;
	cvdst[1].y = dstPoints[1].y;
	cvdst[2].x = dstPoints[2].x;
	cvdst[2].y = dstPoints[2].y;
	cvdst[3].x = dstPoints[3].x;
	cvdst[3].y = dstPoints[3].y;

	//we create a matrix that will store the results
	//from openCV - this is a 3x3 2D matrix that is
	//row ordered
	CvMat * translate = cvCreateMat(3,3,CV_32FC1);

	//this is the slightly easier - but supposidly less
	//accurate warping method
	//cvWarpPerspectiveQMatrix(cvsrc, cvdst, translate);


	//for the more accurate method we need to create
	//a couple of matrixes that just act as containers
	//to store our points  - the nice thing with this
	//method is you can give it more than four points!

	CvMat* src_mat = cvCreateMat(4, 2, CV_32FC1);
	CvMat* dst_mat = cvCreateMat(4, 2, CV_32FC1);

	//copy our points into the matrixes
	cvSetData(src_mat, cvsrc, sizeof(CvPoint2D32f));
	cvSetData(dst_mat, cvdst, sizeof(CvPoint2D32f));

	//figure out the warping!
	//warning - older versions of openCV had a bug
	//in this function.
	cvFindHomography(src_mat, dst_mat, translate);

	//get the matrix as a list of floats
	float *mat = translate->data.fl;


	//we need to copy these values
	//from the 3x3 2D openCV matrix which is row ordered
	//
	// ie:   [0][1][2] x
	//       [3][4][5] y
	//       [6][7][8] w

	//to openGL's 4x4 3D column ordered matrix
	//        x  y  z  w
	// ie:   [0][3][ ][6]
	//       [1][4][ ][7]
	//		 [ ][ ][ ][ ]
	//       [2][5][ ][9]
	//

    ofMatrix4x4 matrixTemp;
	matrixTemp.getPtr()[0]  = mat[0];
	matrixTemp.getPtr()[4]  = mat[1];
	matrixTemp.getPtr()[12] = mat[2];

	matrixTemp.getPtr()[1]  = mat[3];
	matrixTemp.getPtr()[5]  = mat[4];
	matrixTemp.getPtr()[13] = mat[5];

	matrixTemp.getPtr()[3]  = mat[6];
	matrixTemp.getPtr()[7]  = mat[7];
	matrixTemp.getPtr()[15] = mat[8];

    cvReleaseMat(&translate);
    cvReleaseMat(&src_mat);
    cvReleaseMat(&dst_mat);

    return matrixTemp;
}
예제 #17
0
void processImagePair(const char *file1, const char *file2, CvVideoWriter *out, struct CvMat *currentOrientation) {
  // Load two images and allocate other structures
	IplImage* imgA = cvLoadImage(file1, CV_LOAD_IMAGE_GRAYSCALE);
	IplImage* imgB = cvLoadImage(file2, CV_LOAD_IMAGE_GRAYSCALE);
	IplImage* imgBcolor = cvLoadImage(file2);
 
	CvSize img_sz = cvGetSize( imgA );
	int win_size = 15;
  
	// Get the features for tracking
	IplImage* eig_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
	IplImage* tmp_image = cvCreateImage( img_sz, IPL_DEPTH_32F, 1 );
 
	int corner_count = MAX_CORNERS;
	CvPoint2D32f* cornersA = new CvPoint2D32f[ MAX_CORNERS ];
 
	cvGoodFeaturesToTrack( imgA, eig_image, tmp_image, cornersA, &corner_count,
		0.05, 3.0, 0, 3, 0, 0.04 );
 
  fprintf(stderr, "%s: Corner count = %d\n", file1, corner_count);
 
	cvFindCornerSubPix( imgA, cornersA, corner_count, cvSize( win_size, win_size ),
		cvSize( -1, -1 ), cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 50, 0.03 ) );
 
	// Call Lucas Kanade algorithm
	char features_found[ MAX_CORNERS ];
	float feature_errors[ MAX_CORNERS ];
 
	CvSize pyr_sz = cvSize( imgA->width+8, imgB->height/3 );
 
	IplImage* pyrA = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
	IplImage* pyrB = cvCreateImage( pyr_sz, IPL_DEPTH_32F, 1 );
 
	CvPoint2D32f* cornersB = new CvPoint2D32f[ MAX_CORNERS ];
 
  calcNecessaryImageRotation(imgA);
 
	cvCalcOpticalFlowPyrLK( imgA, imgB, pyrA, pyrB, cornersA, cornersB, corner_count, 
		cvSize( win_size, win_size ), 5, features_found, feature_errors,
		 cvTermCriteria( CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3 ), 0 );
 
   CvMat *transform = cvCreateMat(3,3, CV_32FC1);
   CvMat *invTransform = cvCreateMat(3,3, CV_32FC1);
	// Find a homography based on the gradient
   CvMat cornersAMat = cvMat(1, corner_count, CV_32FC2, cornersA);
   CvMat cornersBMat = cvMat(1, corner_count, CV_32FC2, cornersB);
   cvFindHomography(&cornersAMat, &cornersBMat, transform, CV_RANSAC, 15, NULL);

   cvInvert(transform, invTransform);
   cvMatMul(currentOrientation, invTransform, currentOrientation);
   // save the translated image
 	 IplImage* trans_image = cvCloneImage(imgBcolor);
   cvWarpPerspective(imgBcolor, trans_image, currentOrientation, CV_INTER_CUBIC+CV_WARP_FILL_OUTLIERS);

   printf("%s:\n", file1);
   PrintMat(currentOrientation);

  // cvSaveImage(out, trans_image);
  cvWriteFrame(out, trans_image);

  cvReleaseImage(&eig_image);
  cvReleaseImage(&tmp_image);  
  cvReleaseImage(&trans_image);
  cvReleaseImage(&imgA);
  cvReleaseImage(&imgB);
  cvReleaseImage(&imgBcolor);
  cvReleaseImage(&pyrA);
  cvReleaseImage(&pyrB);
  
  cvReleaseData(transform);
  delete [] cornersA;
  delete [] cornersB;
  
  
}