コード例 #1
0
ファイル: siftmatch.cpp プロジェクト: githubcjl/uVision_cjl
//特征点检测
void SiftMatch::on_detectButton_clicked()
{
    img1_Feat = cvCloneImage(img1);//复制图1,深拷贝,用来画特征点
    img2_Feat = cvCloneImage(img2);//复制图2,深拷贝,用来画特征点

    //默认提取的是LOWE格式的SIFT特征点
    //提取并显示第1幅图片上的特征点
    n1 = sift_features( img1, &feat1 );//检测图1中的SIFT特征点,n1是图1的特征点个数
    export_features("feature1.txt",feat1,n1);//将特征向量数据写入到文件
    draw_features( img1_Feat, feat1, n1 );//画出特征点

    //cvNamedWindow(IMG1_FEAT);//创建窗口
    //cvShowImage(IMG1_FEAT,img1_Feat);//显示

    QString name1_Feat = name1;//文件名,原文件名加"_Feat"
    cvSaveImage(name1_Feat.insert( name1_Feat.lastIndexOf(".",-1) , "_Feat").toAscii().data(),img1_Feat);//保存图片

    //提取并显示第2幅图片上的特征点
    n2 = sift_features( img2, &feat2 );//检测图2中的SIFT特征点,n2是图2的特征点个数
    export_features("feature2.txt",feat2,n2);//将特征向量数据写入到文件
    draw_features( img2_Feat, feat2, n2 );//画出特征点

    //cvNamedWindow(IMG2_FEAT);//创建窗口
    //cvShowImage(IMG2_FEAT,img2_Feat);//显示

    QString name2_Feat = name2;//文件名,原文件名加"_Feat"
    cvSaveImage(name2_Feat.insert( name2_Feat.lastIndexOf(".",-1) , "_Feat").toAscii().data(),img2_Feat);//保存图片

    ui->detectButton->setEnabled(false);//禁用特征检测按钮
    ui->radioButton_horizontal->setEnabled(true);//激活排列方向选择按钮
    ui->radioButton_vertical->setEnabled(true);
    ui->matchButton->setEnabled(true);//激活特征匹配按钮
}
コード例 #2
0
ファイル: main.c プロジェクト: pseudonumos/ImageBasedRenderer
// Feature Thread
void* featureThread(void* featureData)
{
  char file[25];
  int index, numFeatures;
  struct feature* feat;
  
  struct fData* temp;
  temp = (struct fData*) featureData;
  IplImage* img = temp->img;
  index = temp->index;

  sprintf(file, "features/temp%d", index);  

  struct feature* feat1;
  if (import_features(file, FEATURE_LOWE, &feat1) == -1) {
    numFeatures = sift_features(img, &feat);
    export_features(file, feat, numFeatures);

      printf("features for image %d:\n", index);
    // print all features
    if (DEBUG) {

      printFeature(feat, numFeatures, 100);
      printf("\n\n");
    }

  }

}
コード例 #3
0
ファイル: match_num.c プロジェクト: robwhess/opensift
void* process_image(void* arg) {
  int n;
  struct thread_data* ctx;
  IplImage*       img;

  ctx = (struct thread_data*)arg;
  img = cvLoadImage(ctx->filename, 1);
  if (!img) fatal_error("Unable to load image from %s", ctx->filename);
  ctx->fdata.count = sift_features(img, &(ctx->fdata.features));
  if (DEBUG)
  fprintf(stderr, "Found %d features in %s...\n", ctx->fdata.count, ctx->filename);
  cvReleaseImage(&img);
  pthread_exit(NULL);
}
コード例 #4
0
ファイル: match.c プロジェクト: huaijin-chen/SIFT.huaijin
int main( int argc, char** argv )
{
    IplImage* img1, * img2, * stacked;
    struct feature* feat1, * feat2, * feat;
    struct feature** nbrs;
    struct kd_node* kd_root;
    CvPoint pt1, pt2;
    double d0, d1;
    int n1, n2, k, i, m = 0;

    if( argc != 3 )
        fatal_error( "usage: %s <img1> <img2>", argv[0] );

    img1 = cvLoadImage( argv[1], 1 );
    if( ! img1 )
        fatal_error( "unable to load image from %s", argv[1] );
    img2 = cvLoadImage( argv[2], 1 );
    if( ! img2 )
        fatal_error( "unable to load image from %s", argv[2] );
    stacked = stack_imgs( img1, img2 );

    fprintf( stderr, "Finding features in %s...\n", argv[1] );
    n1 = sift_features( img1, &feat1 );
    fprintf( stderr, "Finding features in %s...\n", argv[2] );
    n2 = sift_features( img2, &feat2 );
    kd_root = kdtree_build( feat2, n2 );
    for( i = 0; i < n1; i++ )
    {
        feat = feat1 + i;
        k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS );
        if( k == 2 )
        {
            d0 = descr_dist_sq( feat, nbrs[0] );
            d1 = descr_dist_sq( feat, nbrs[1] );
            if( d0 < d1 * NN_SQ_DIST_RATIO_THR )
            {
                pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) );
                pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );
                pt2.y += img1->height;
                cvLine( stacked, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 );
                m++;
                feat1[i].fwd_match = nbrs[0];
            }
        }
        free( nbrs );
    }

    fprintf( stderr, "Found %d total matches\n", m );
    display_big_img( stacked, "Matches" );
    cvWaitKey( 0 );

    /*
       UNCOMMENT BELOW TO SEE HOW RANSAC FUNCTION WORKS

       Note that this line above:

       feat1[i].fwd_match = nbrs[0];

       is important for the RANSAC function to work.
    */
// /*
// {
    CvMat* H;
    IplImage* xformed;
    H = ransac_xform( feat1, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.01,
                      homog_xfer_err, 3.0, NULL, NULL );
    if( H )
    {
        xformed = cvCreateImage( cvGetSize( img2 ), IPL_DEPTH_8U, 3 );
        cvWarpPerspective( img1, xformed, H,
                           CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS,
                           cvScalarAll( 0 ) );
        cvNamedWindow( "Xformed", 1 );
        cvShowImage( "Xformed", xformed );
        cvWaitKey( 0 );
        cvReleaseImage( &xformed );
        cvReleaseMat( &H );
    }
// }
    //*/

    cvReleaseImage( &stacked );
    cvReleaseImage( &img1 );
    cvReleaseImage( &img2 );
    kdtree_release( kd_root );
    free( feat1 );
    free( feat2 );
    return 0;
}
コード例 #5
0
ファイル: test.cpp プロジェクト: bertptrs/uni-mir
int main(int argc, char *argv[])
{
	IplImage* img1, * img2, * stacked1, *stacked2;
        char stemp[1024];

// printf("Reading images: %s and %s\n",argv[1],argv[2]);
  if(argc != 3) {printf("\n\nUsage: getsift [image1.jpg] [image2.jpg]\n\n"); exit(0);}

	img1=read_jpeg_file(argv[1]);
	img2=read_jpeg_file(argv[2]);
	stacked1 = stack_imgs( img1, img2 );
	stacked2 = stack_imgs( img1, img2 );
	
	struct feature* feat1, * feat2, * feat;
	struct feature** nbrs;
	struct feature** RANnb;
	struct kd_node* kd_root;
	CvPoint pt1, pt2;
	double d0, d1;
	int n1, n2, k, i,j, m = 0, n=0;
	
	printf("SIFT Features Extraction: %s\n", argv[1]);
	n1 = sift_features( img1, &feat1 );
	printf("Numbers of Features from %s: %d\n",argv[1], n1);

	printf("SIFT Features Extraction: %s\n", argv[2]);
	n2 = sift_features( img2, &feat2 );
	printf("Numbers of Features from %s: %d\n",argv[2], n2);

        sprintf(stemp,"%s.sift.jpg",argv[1]);
	draw_keypoint(  img1,  feat1, n1 );
	write_jpeg_file(stemp,img1);

        sprintf(stemp,"%s.sift.jpg",argv[2]);
	draw_keypoint(  img2,  feat2, n2 );
	write_jpeg_file(stemp,img2);

	FILE * feat1file;
	FILE * feat2file;

	feat1file=fopen("features1.txt","w+");
	for(i=0;i<n1;i++)
	{
		fprintf(feat1file,"(%lf,%lf): {",(feat1+i)->x,(feat1+i)->y);	
		for(j=0;j<FEATURE_MAX_D;j++)
			fprintf(feat1file,"% lf ",(feat1+i)->descr[j]);
		fprintf(feat1file,"}\n");
	}
    printf("coordinate and descriptor of %s keypoints have been written in featfile1.txt\n",argv[1]);

	feat2file=fopen("features2.txt","w+");
	for(i=0;i<n2;i++)
	{
		fprintf(feat2file,"(%lf,%lf): {",(feat2+i)->x,(feat2+i)->y);
		for(j=0;j<FEATURE_MAX_D;j++)
			fprintf(feat2file,"% lf ",(feat2+i)->descr[j]);
		fprintf(feat2file,"}\n");
	}
	printf("coordinate and descriptor of %s keypoints have been written in featfile2.txt\n",argv[2]);

	kd_root = kdtree_build( feat2, n2 );	

	for( i = 0; i < n1; i++ )
	{
		feat = feat1 + i;
		k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS );
		if( k == 2 )
		{
			d0 = descr_dist_sq( feat, nbrs[0] );
			d1 = descr_dist_sq( feat, nbrs[1] );
			if( d0 < d1 * NN_SQ_DIST_RATIO_THR )
			{
				pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) );
				pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );
				pt2.y += img1->height;
				cvLine( stacked1, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 );
				m++;
				feat1[i].fwd_match = nbrs[0];
			}
		}
		free( nbrs );
	}

	printf("Found %d total matches\n", m );
	write_jpeg_file("matches.jpg",stacked1);

  
    CvMat* H;
    int number=0;

    H = ransac_xform( feat1, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.25, homog_xfer_err, 27.0, &RANnb, &number );	

   	for( i = 0; i < number; i++ )
	{
		pt1 = cvPoint( cvRound( RANnb[i]->x ), cvRound( RANnb[i]->y ) );
		pt2 = cvPoint( cvRound( RANnb[i]->fwd_match->x ), cvRound( RANnb[i]->fwd_match->y ) );
		pt2.y += img1->height;
		cvLine( stacked2, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 );
		n++;		
	}

	printf("Found %d total matches after RANSAC\n", n );
	write_jpeg_file("matches.ransac.jpg",stacked2);
	
       cvReleaseImage( &img1 );
	cvReleaseImage( &img2 );
	kdtree_release( kd_root );
	free( feat1 );
	free( feat2 );
	return 0;
}
コード例 #6
0
ファイル: match.cpp プロジェクト: ryanfb/homer
int match( const char * img1fname, const char * img2fname, CvMat **H )
{
	double scale = 10.0;

	IplImage* img1, * img2, * img1orig, * img2orig;

	struct feature* feat1, * feat2, * feat;
	struct feature** nbrs;
	struct kd_node* kd_root;
	CvPoint pt1, pt2;
	double d0, d1;
	int n1, n2, k, i, m = 0;

	img1orig = cvLoadImage( img1fname, CV_LOAD_IMAGE_COLOR );
	if( ! img1orig )
		fatal_error( "unable to load image from %s", img1fname );
  img1 = cvCreateImage( cvSize( (int)(img1orig->width/scale), (int)(img1orig->height/scale) ), img1orig->depth, img1orig->nChannels );
	cvResize( img1orig, img1, CV_INTER_AREA );
	cvReleaseImage( &img1orig );
  
	img2orig = cvLoadImage( img2fname, CV_LOAD_IMAGE_COLOR );
	if( ! img2orig )
		fatal_error( "unable to load image from %s", img2fname );
  img2 = cvCreateImage( cvSize( (int)(img2orig->width/scale), (int)(img2orig->height/scale) ), img2orig->depth, img2orig->nChannels );
	cvResize( img2orig, img2, CV_INTER_AREA );
	cvReleaseImage( &img2orig );

	fprintf( stdout, "Finding features in %s...\n", img1fname );
  n1 = sift_features( img1, &feat1 );
  fprintf( stdout, "Finding features in %s...\n", img2fname );
  n2 = sift_features( img2, &feat2 );

	cvReleaseImage( &img1 );
	cvReleaseImage( &img2 );

	kd_root = kdtree_build( feat2, n2 );
  for( i = 0; i < n1; i++ ) {
		feat = feat1 + i;
		k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS );
		if( k == 2 ) {
			d0 = descr_dist_sq( feat, nbrs[0] );
			d1 = descr_dist_sq( feat, nbrs[1] );
			if( d0 < d1 * NN_SQ_DIST_RATIO_THR ) {
				/*
				pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) );
				pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );
				pt2.y += img1->height;
				*/
				m++;
				feat1[i].fwd_match = nbrs[0];
			}
		}
		free( nbrs );
	}

	
	// scale feature coordinates back in to original image size
	for( i = 0; i < n2; i++ ) {
		feat2[i].img_pt.x *= scale;
		feat2[i].img_pt.y *= scale;
	}
	for( i = 0; i < n1; i++ ) {
		feat1[i].img_pt.x *= scale;
		feat1[i].img_pt.y *= scale;
	}
  
	fprintf( stdout, "Found %d total matches\n", m );
  
	{
    // CvMat* H;
    *H = ransac_xform( feat1, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.01,
		      homog_xfer_err, 3.0, NULL, NULL );
		if(*H != NULL) {
			printf("Perspective transform:\n");
			for( i = 0; i < 3; i++ ) {
				printf("%0.6f %0.6f %0.6f\n", (*H)->data.db[3*i+0], (*H)->data.db[3*i+1], (*H)->data.db[3*i+2]);
			}
		}
		else {
			printf("Unable to compute transform\n");
		}
		/*
		CvMat * Hinv = cvCreateMat(3,3,CV_64FC1);
		cvInvert( *H, Hinv, CV_LU );
		printf("Inverse transform:\n");
		for( i = 0; i < 3; i++ ) {
			printf("%0.6f %0.6f %0.6f\n", Hinv->data.db[3*i+0], Hinv->data.db[3*i+1], Hinv->data.db[3*i+2]);
		}
		cvReleaseMat( &Hinv );
		*/

		/*
		if( H ) {
			IplImage* xformed;
			img2orig = cvLoadImage( img2fname, CV_LOAD_IMAGE_COLOR );
			xformed = cvCreateImage( cvGetSize( img2orig ), IPL_DEPTH_8U, 3 );
			cvReleaseImage( &img2orig );
			img1orig = cvLoadImage( img1fname, CV_LOAD_IMAGE_COLOR);
			cvWarpPerspective( img1orig, xformed, H, 
						CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS,
						cvScalarAll( 0 ) );
			cvSaveImage( "xformed.jpg", xformed );
			cvReleaseImage( &img1orig );
			cvReleaseImage( &xformed );
			// cvReleaseMat( &H );
		}
		*/
	}

	kdtree_release( kd_root );
	free( feat1 );
	free( feat2 );
	return 0;
}
コード例 #7
0
ファイル: stitcher.c プロジェクト: huaijin-chen/SIFT.huaijin
int main( int argc, char** argv )
{

	IplImage* img1, * img2, * stacked;
	struct feature* feat1, * feat2, * feat;
	struct feature** nbrs;
	struct kd_node* kd_root;
	CvPoint pt1, pt2;
	double d0, d1;
	int n1, n2, k, i, m = 0;
	int match_cnt = 0;

	//CvMat *im1_mask = cvCreateMat( img1->height, img1->width, CV_64FC1 );
	//CvMat *im2_mask = cvCreateMat( img2->height, img2->width, CV_64FC1 );
  
	//cvSet( im1_mask, cvScalar( 1, 0, 0, 0 ), NULL );
	//cvSet( im2_mask, cvScalar( 1, 0, 0, 0 ), NULL );

	if( argc != 3 )
		fatal_error( "usage: %s <img1> <img2>", argv[0] );
  
	img1 = cvLoadImage( argv[1], 1 );
	if( ! img1 )
		fatal_error( "unable to load image from %s", argv[1] );
	img2 = cvLoadImage( argv[2], 1 );
	if( ! img2 )
		fatal_error( "unable to load image from %s", argv[2] );
	stacked = stack_imgs( img1, img2 );

	fprintf( stderr, "Finding features in %s...\n", argv[1] );
	n1 = sift_features( img1, &feat1 );
	fprintf( stderr, "Finding features in %s...\n", argv[2] );
	n2 = sift_features( img2, &feat2 );
	kd_root = kdtree_build( feat2, n2 );
	for( i = 0; i < n1; i++ )
    {
		feat = feat1 + i;
		k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS );
		if( k == 2 )
		{
			d0 = descr_dist_sq( feat, nbrs[0] );
			d1 = descr_dist_sq( feat, nbrs[1] );
			if( d0 < d1 * NN_SQ_DIST_RATIO_THR  )
			{
				if( m >= 2000 ) break;
				pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) );
				pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );
				pt2.y += img1->height;
				cvLine( stacked, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 );
				m++;
				feat1[i].fwd_match = nbrs[0];
			 }
		}
		free( nbrs );
    }

	fprintf( stderr, "Found %d total matches\n", m );
	display_big_img( stacked, "Matches" );
	cvWaitKey( 0 );
	/*********************************************************************************************************/
     
	CvMat* H;
    IplImage* xformed;
    H = ransac_xform( feat1, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.01, homog_xfer_err, 3.0, NULL, NULL );
	/* output H */
	printf("xform Homography Matrix is :\n"); 
	printMat( H );

	
	/* get the size of new image  */
	double XDATA[2];
	double YDATA[2];
	CvSize new_size;
	CvSize im1_size = cvGetSize( img1 );
	CvSize im2_size = cvGetSize( img2 );
		
	new_size = get_Stitched_Size( im1_size, im2_size, H, XDATA, YDATA );


	/*declare the mask*/
	CvMat *im1_mask = cvCreateMat( new_size.height, new_size.width, CV_64FC1 );
	CvMat *im2_mask = cvCreateMat( new_size.height, new_size.width, CV_64FC1 );

	CvMat *im1_tempMask = cvCreateMat( im1_size.height, im1_size.width, CV_64FC1 );
	CvMat *im2_tempMask = cvCreateMat( im2_size.height, im2_size.width, CV_64FC1 );
	cvSet( im1_tempMask, cvScalar(1,0,0,0), NULL );
	cvSet( im2_tempMask, cvScalar(1,0,0,0), NULL );

	/*  get translation Matrix for Aligning */
	CvMat *T = cvCreateMat( 3, 3, CV_64FC1 );
	double tx = 0;
	double ty = 0;
	if( XDATA[0] < 0 )
	{
		tx =  -XDATA[0] ;
		printf("tx = %f\n", tx );
	}
	if( YDATA[0] < 0 )
	{
		ty = -YDATA[0];
		printf("ty = %f\n", ty );
	}
	cvmSet( T, 0, 0, 1 );
	cvmSet( T, 0, 2, tx );
	cvmSet( T, 1, 1, 1 );
	cvmSet( T, 1, 2, ty );
	cvmSet( T, 2, 2, 1 );
	printf("T Matrix:\n");
	printMat( T );

	/* Transform and Align image2 */
	cvGEMM( T, H, 1, NULL, 0, H, 0 );
	printMat( H );
	xformed = cvCreateImage( new_size, IPL_DEPTH_8U, 3 );
	cvWarpPerspective( img1, xformed, H, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
	cvNamedWindow( "Xformed1", 1 );
	cvShowImage( "Xformed1", xformed );
	cvWaitKey( 0 );
	cvDestroyWindow("Xformed1");
	//cvSaveImage("im2.png", xformed);
	cvWarpPerspective( im1_tempMask, im1_mask, H, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
	//cvSaveImage("im1_mask.png", im1_mask);
	cvNamedWindow( "im1_mask", 1 );
	cvShowImage( "im1_mask", im1_mask );
	cvWaitKey( 0 );
	cvDestroyWindow("im1_mask");

	/* Align image1 to bound */
	cvWarpPerspective( im2_tempMask, im2_mask, T, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
	//cvSaveImage( "im2_mask.png", im2_mask );
	cvNamedWindow( "im12_mask", 1 );
	cvShowImage( "im2_mask", im2_mask );
	cvWaitKey( 0 );
	cvDestroyWindow("im2_mask");
	cvSetImageROI( xformed, cvRect( tx, ty, img2->width, img2->height ) );
	cvCopy( img2, xformed, NULL );
	
	IplImage* huaijin = cvCreateImage( new_size, IPL_DEPTH_8U, 3 );
	cvWarpPerspective( img2, huaijin, T, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) );
	cvNamedWindow( "im12_mask_i", 1 );
	cvShowImage( "im2_mask_i", huaijin);
	cvWaitKey( 0 );
	cvDestroyWindow("im2_mask_i");
	cvResetImageROI( xformed );
	//cvSaveImage( "re.png", xformed );


	/* composite */


	cvNamedWindow( "Xformed", 1 );
	cvShowImage( "Xformed", xformed );
	cvWaitKey( 0 );
	cvDestroyWindow("Xformed");
	/*  */


	cvReleaseImage( &xformed );
	cvReleaseMat( &H );
	cvReleaseMat( &im1_tempMask );
	cvReleaseMat( &im2_tempMask );
	cvReleaseMat( &T );
	cvReleaseMat ( &im1_mask );
	cvReleaseMat ( &im2_mask );
	cvReleaseImage( &stacked );
	cvReleaseImage( &img1 );
	cvReleaseImage( &img2 );
	kdtree_release( kd_root );
	free( feat1 );
	free( feat2 );

		
/****************************************************
 * using RANSAC algorithm get the Homegraphy Matrix
 * get T image1-->image2
 * 
 * n_pts		the number of  points for estimating parameters  
 *
 *
 *
 * create unique indics of matchs : get_randi( int j )
 *
 *
 * ******slove*********** 
 * 1.create the coff matrix
 * 2.Ah=0 -->decomposit A = UDV^T using gsl 
 * 3 
 *      [ v19 v29 v39 ... v99 ]
 * h = -------------------------
 *               v99
 *
 * ***************************************************/
	/*
	CvMat *H1 = cvCreateMat( 3, 3, CV_64FC1 );
	CvMat *inliers_mask = cvCreateMat( m, 1, CV_64FC1 );
	RANSAC_Homography( m, pts1, pts2, H1, inliers_mask );
	printf("my code  Homography Matrix is :\n"); 
	for ( i = 0; i < H->rows; i++ ){
		for( k = 0; k < H->cols; k++ ){
			printf("%f	",cvmGet( H1, i, k ));
		}
		printf("\n");
	}


	cvReleaseMat( &H1 );
	cvReleaseMat( &inliers_mask );*/
/***********************************************
 * composit image1 & image2 
 * 1) transform image2 to image
 *
 * 2)***stitched image bounds****
 * W = max( [size(im1,2) size(im1,2)-XDATA(1) size(im2,2) size(im2,2)+XDATA(1)] );
 * H = max( [size(im1,1) size(im1,1)-YDATA(1) size(im2,1) size(im2,1)+YDATA(1)] );
 *
 * 3)*** Align image1 to bound ***
 *
 * 4)*** Align image2 to bound ***
 * 
 * 5)*** Check  size of bounds *** 
 * 
 * 6)*** combine both images ***
 *		im1_mask
 *		im2_mask
 *		im1_part_mask
 *		im2_part_mask
 *		com_part_mask
 *		stitched_image
 * 
 * 7)****copy im2 transformed to ROI of stitching plan ( just a idear )
 *
 ************************************************/

  void compositImages( IplImage *im1, IplImage *im2, CvMat *H )
  {
	/*  1.create a plan																			*/
	/*  2.transform im2 & im2_mask																*/
	/*	3.emstime translation of im2 --T  cp im1 -->plan with cvRect( tx, ty)					*/
	/*	4.cp tranformed im2 to plan with im2_mask where im2_mask has the same size with plan	*/

  
  }

  return 0;
}
コード例 #8
0
ファイル: main.cpp プロジェクト: fxia22/tinker
int main( int argc, char** argv )
{
	int KDTREE_BBF_MAX_NN_CHKS = 200;
	float NN_SQ_DIST_RATIO_THR = 0.49;
	const CvScalar color = cvScalar( 255, 255, 0 );
	ros::init(argc, argv, "SIFT");
	ros::NodeHandle n;
	ros::Rate rate(33);

	ImageConverter ic;

	while ( !ic.ready )
	{
		ros::spinOnce();
		rate.sleep();
		if ( !ros::ok() )
		{
			printf("terminated by control_c\n");
			return 0;
		}
	}

	string filepath = ros::package::getPath("sift") + "/";

	ifstream fin( (filepath + "store.txt").data(), ios::in); 
	//ifstream fin_main_pic( (filepath + "main_pic.txt").data(), ios::in);
	int pic_num = 5;
	string find;
	//cout << "how many pictures?" << endl;
	//cin >> pic_num;
	//cout << "which picture?" << endl;
	//cin >> find;

	time_t rawtime; 
	struct tm * timeinfo; 
	time ( &rawtime ); 
	timeinfo = localtime ( &rawtime ); 
	printf ( "The current date/time is: %s", asctime (timeinfo) ); 

	char line[1024] = {0}; 
	string* store = new string [pic_num+1];
	string main_pic_name;
	int pic = 0;
	int find_pic = 0;
	while(fin.getline(line, sizeof(line))) 
	{
		stringstream word(line); 
		word >> store[pic];
		store[pic] = filepath + store[pic];
		/*if (store[pic] == find)
		{
			cout << store[pic] << endl;
			find_pic = pic;
		}*/
		pic++;
	}
	//fin_main_pic.getline(line, sizeof(line));
	//stringstream word(line);
	//word >> main_pic_name;
	//fin_main_pic.clear();
	//fin_main_pic.close();
	fin.clear(); 
	fin.close();

	IplImage* img;
	//IplImage* img1;
	struct feature* features;//, * features1;

	feature** features_all = new feature*[pic];
	int* features_num = new int[pic];
	for (int i = 0; i < pic; i++ )
		features_num[i] = 0;
	IplImage** img_all = new IplImage*[pic];
	for ( int i = 0; i < pic; i++ )
	{
		//printf ( "Finding features in template picture %d\n", i );
		img_all[i] = cvLoadImage( store[i].data(), 1 );
		features_num[i] = sift_features( img_all[i], &features_all[i] );
		printf ( "%d features in template picture %d\n", features_num[i], i );
		time ( &rawtime ); 
		timeinfo = localtime ( &rawtime ); 
		printf ( "The current date/time is: %s", asctime (timeinfo) );
	}
/*
	printf ( "Finding features in main picture\n" );
	img = cvLoadImage( main_pic_name.data(), 1 );
	int n1 = sift_features( img, &features );
	printf ( "%d features in main picture\n", n1 );
*/
	
	//cvShowImage( "main", img );
	//for (int i = 0; i < n1; i++)
		//cvCircle( img, cvPoint(features[i].x, features[i].y), 5, color, 1, 8, 0 );
	//cvShowImage( "Foundmain", img );


	//cvShowImage( "template", img1 );
	//for (int i = 0; i < n2; i++)
		//cvCircle( img1, cvPoint(features1[i].x, features1[i].y), 5, color, 1, 8, 0 );
	//cvShowImage( "Foundtemplate", img1 );

	bool features_catched = false;
	while ( ros::ok() )
	{
		if ( ic.ready == true )
		{
			ic.ready = false;
			*img = ic.curr_image;
			int n1 = sift_features( img, &features );
			printf ( "%d features in main picture\n", n1 );
			time ( &rawtime ); 
			timeinfo = localtime ( &rawtime ); 
			printf ( "The current date/time is: %s", asctime (timeinfo) );
			features_catched = false;
			for ( int j = 0; j < pic ; j++ )
			{
				IplImage* stacked;
				IplImage* ransac;
				struct feature* feat;
				struct feature** nbrs;
				struct kd_node* kd_root;
				CvPoint pt1, pt2;
				double d0, d1;
				int k, i, m = 0;
				CvMat point1_test;
				CvMat point2_test;
				double point1[3];
				double point2[3] = { 0 };

				stacked = stack_imgs( img, img_all[j] );
				ransac = stack_imgs( img, img_all[j] );

				kd_root = kdtree_build( features_all[j], features_num[j] );
				for( i = 0; i < n1; i++ )
				{
					feat = features + i;
					k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS );
					if( k == 2 )
					{
						d0 = descr_dist_sq( feat, nbrs[0] );
						d1 = descr_dist_sq( feat, nbrs[1] );
						if( d0 < d1 * NN_SQ_DIST_RATIO_THR )
						{
							//pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) );
							//pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );
							//pt2.y += img->height;
							//cvCircle( stacked, pt1, 3, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 );
							//cvCircle( stacked, pt2, 3, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 );
							//cvLine( stacked, pt1, pt2, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 );
							m++;
							features[i].fwd_match = nbrs[0];
						}
					}
					free( nbrs );
				}

				double accounts = m * 100 / (double)features_num[j];
				printf( "%d total matches, accounts for %f %%, in pic %d\n", m, accounts, j);
				//cvNamedWindow( "Matches", 1 );
				//cvShowImage( "Matches", stacked );

				time ( &rawtime ); 
				timeinfo = localtime ( &rawtime ); 
				printf ( "The current date/time is: %s", asctime (timeinfo) );	

				CvMat* H = ransac_xform( features, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.01, error, 2, NULL, NULL );
			    if( H )
			    {
					for( i = 0; i < n1; i++ )
					{
						feat = features + i;
						k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS );
						if( k == 2 )
						{
							d0 = descr_dist_sq( feat, nbrs[0] );
							d1 = descr_dist_sq( feat, nbrs[1] );
							if( d0 < d1 * NN_SQ_DIST_RATIO_THR )
							{
								pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) );
								pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );
								pt2.y += img->height;
								point1[0] = pt1.x;
								point1[1] = pt1.y;
								point1[2] = 1.0;
								cvInitMatHeader( &point1_test, 3, 1, CV_64FC1, point1, CV_AUTOSTEP );
								cvInitMatHeader( &point2_test, 3, 1, CV_64FC1, point2, CV_AUTOSTEP );
								cvMatMul( H, &point1_test, &point2_test );
								/*if ( abs( point2[0]/point2[2]-pt2.x) < 2 && abs( point2[1]/point2[2]+img->height-pt2.y) < 2 )
								{
									cvCircle( ransac, pt1, 3, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 );
									cvCircle( ransac, pt2, 3, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 );
									cvLine( ransac, pt1, pt2, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 );
								}*/
			//					features[i].fwd_match = nbrs[0];
							}
						}
						free( nbrs );
						//printf("features catched, going to exit\n");
					}

					//cvNamedWindow( "Xformed" );
					//cvShowImage( "Xformed", ransac );

					features_catched = true;
					time ( &rawtime ); 
					timeinfo = localtime ( &rawtime ); 
					printf ( "ransac.. The current date/time is: %s", asctime (timeinfo) ); 
			    }
				//cvWaitKey( 0 );
				cvReleaseImage( &ransac );
				cvReleaseMat( &H );
				//cvDestroyWindow( "main" );
				//cvDestroyWindow( "Foundmain" );
				//cvDestroyWindow( "template" );
				//cvDestroyWindow( "Foundtemplate" );
				//cvReleaseImage( &img_all[j] );
				cvReleaseImage( &stacked );
				kdtree_release( kd_root );
			}
			if (!features_catched)
			{
				printf("Sorry, there is no item in the picture\n");
			}
			else
			{
				printf("Item catched in the picture!\n");
			}
		}
		ros::spinOnce();
		rate.sleep();
	}
	//cvReleaseImage( &img );
	free( features );
	for ( int i = 0; i < pic; i++ )
	{
		free( features_all[i] );
	}
	free(features_all);
	return 0;
}