IplImage *cmp_two_image(IplImage *src1, IplImage *src2) { float **des1, **des2; int npts1, npts2; int ndes; t_point *pts1, *pts2; // IplImage *img1 = harris(src1, 0.01, &des1, &npts1, &ndes, &pts1); // IplImage *img2 = harris(src2, 0.01, &des2, &npts2, &ndes, &pts2); IplImage *print = stack_imgs(src1, src2); harris_center(src1, 0.01); harris_center(src2, 0.01); return NULL; /*print des of img1 and img2*/ #ifdef PRINT for (int i = 0; i < npts1; i++) { for (int j = 0; j < ndes; j++) { cout << des1[i][j] << " "; } cout << endl; } cout << "\n"; for (int i = 0; i < npts2; i++) { for (int j = 0; j < ndes; j++) { cout << des2[i][j] << " "; } cout << endl; } cout << "\n"; #endif int mark = 0; float mdist = 10000; for (int i = 0; i < npts1; i++) { mdist = 10000; for (int j = 0; j < npts2; j++) { float dist = 0; for (int k = 0; k < ndes; k++) { dist += ABS(des1[i][k] - des2[j][k]); } #ifdef PRINT cout << dist << " " << i << " " << j << endl; #endif if (dist < mdist) { mdist = dist; mark = j; } } if (mdist < 0.02) { cvLine(print, cvPoint(pts1[i].x, pts1[i].y), cvPoint(pts2[mark].x + src1->width, pts2[mark].y), cvScalar(0, 255, 0), 1); } } show_image(print, "match"); }
//get from opensift //get from opensift void FlannMatcher::drawInliers(cv::Mat& image1,cv::Mat& image2, vector<cv::KeyPoint>& keypoints1, vector<cv::KeyPoint>& keypoints2, vector<cv::DMatch>& matches, vector<cv::DMatch>& bestMatches) { IplImage* stacked_img=NULL; IplImage test1=IplImage(image1); IplImage test2=IplImage(image2); IplImage* tmp_img1=&test1; IplImage* tmp_img2=&test2; stacked_img=stack_imgs(tmp_img1,tmp_img2); //change c to mat cv::Mat mat_img(stacked_img,true); //deep copy for(unsigned int i=0;i<matches.size();++i) { cv::Point2f point1; cv::Point2f point2; point1=keypoints1[matches[i].queryIdx].pt; point2.x=keypoints2[matches[i].trainIdx].pt.x; point2.y=keypoints2[matches[i].trainIdx].pt.y+image1.rows; cv::line(mat_img,point1,point2,CV_RGB(255,0,0), 1, 8, 0); } for(unsigned int i=0;i<bestMatches.size();++i) { cv::Point2f point1; cv::Point2f point2; point1=keypoints1[bestMatches[i].queryIdx].pt; point2.x=keypoints2[bestMatches[i].trainIdx].pt.x; point2.y=keypoints2[bestMatches[i].trainIdx].pt.y+image1.rows; cv::line(mat_img,point1,point2,CV_RGB(0,255,0), 1, 8, 0); } cv::imshow("ransac inliers",mat_img); }
//特征匹配 void SiftMatch::on_matchButton_clicked() { //若用户勾选了水平排列按钮 if(ui->radioButton_horizontal->isChecked()) { //将2幅图片合成1幅图片,img1在左,img2在右 stacked = stack_imgs_horizontal(img1, img2);//合成图像,显示经距离比值法筛选后的匹配结果 } else//用户勾选了垂直排列按钮 { verticalStackFlag = true;//垂直排列标识设为true //将2幅图片合成1幅图片,img1在上,img2在下 stacked = stack_imgs( img1, img2 );//合成图像,显示经距离比值法筛选后的匹配结果 } //根据图1的特征点集feat1建立k-d树,返回k-d树根给kd_root kd_root = kdtree_build( feat1, n1 ); Point pt1,pt2;//连线的两个端点 double d0,d1;//feat2中每个特征点到最近邻和次近邻的距离 int matchNum = 0;//经距离比值法筛选后的匹配点对的个数 //遍历特征点集feat2,针对feat2中每个特征点feat,选取符合距离比值条件的匹配点,放到feat的fwd_match域中 for(int i = 0; i < n2; i++ ) { feat = feat2+i;//第i个特征点的指针 //在kd_root中搜索目标点feat的2个最近邻点,存放在nbrs中,返回实际找到的近邻点个数 int k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS ); if( k == 2 ) { d0 = descr_dist_sq( feat, nbrs[0] );//feat与最近邻点的距离的平方 d1 = descr_dist_sq( feat, nbrs[1] );//feat与次近邻点的距离的平方 //若d0和d1的比值小于阈值NN_SQ_DIST_RATIO_THR,则接受此匹配,否则剔除 if( d0 < d1 * NN_SQ_DIST_RATIO_THR ) { //将目标点feat和最近邻点作为匹配点对 pt2 = Point( cvRound( feat->x ), cvRound( feat->y ) );//图2中点的坐标 pt1 = Point( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) );//图1中点的坐标(feat的最近邻点) if(verticalStackFlag)//垂直排列 pt2.y += img1->height;//由于两幅图是上下排列的,pt2的纵坐标加上图1的高度,作为连线的终点 else pt2.x += img1->width;//由于两幅图是左右排列的,pt2的横坐标加上图1的宽度,作为连线的终点 cvLine( stacked, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 );//画出连线 matchNum++;//统计匹配点对的个数 feat2[i].fwd_match = nbrs[0];//使点feat的fwd_match域指向其对应的匹配点 } } free( nbrs );//释放近邻数组 } qDebug()<<tr("经距离比值法筛选后的匹配点对个数:")<<matchNum<<endl; //显示并保存经距离比值法筛选后的匹配图 //cvNamedWindow(IMG_MATCH1);//创建窗口 //cvShowImage(IMG_MATCH1,stacked);//显示 //保存匹配图 QString name_match_DistRatio = name1;//文件名,原文件名去掉序号后加"_match_DistRatio" cvSaveImage(name_match_DistRatio.replace( name_match_DistRatio.lastIndexOf(".",-1)-1 , 1 , "_match_DistRatio").toAscii().data(),stacked); //利用RANSAC算法筛选匹配点,计算变换矩阵H, //无论img1和img2的左右顺序,H永远是将feat2中的特征点变换为其匹配点,即将img2中的点变换为img1中的对应点 H = ransac_xform(feat2,n2,FEATURE_FWD_MATCH,lsq_homog,4,0.01,homog_xfer_err,3.0,&inliers,&n_inliers); //若能成功计算出变换矩阵,即两幅图中有共同区域 if( H ) { qDebug()<<tr("经RANSAC算法筛选后的匹配点对个数:")<<n_inliers<<endl; // //输出H矩阵 // for(int i=0;i<3;i++) // qDebug()<<cvmGet(H,i,0)<<cvmGet(H,i,1)<<cvmGet(H,i,2); if(verticalStackFlag)//将2幅图片合成1幅图片,img1在上,img2在下 stacked_ransac = stack_imgs( img1, img2 );//合成图像,显示经RANSAC算法筛选后的匹配结果 else//将2幅图片合成1幅图片,img1在左,img2在右 stacked_ransac = stack_imgs_horizontal(img1, img2);//合成图像,显示经RANSAC算法筛选后的匹配结果 //img1LeftBound = inliers[0]->fwd_match->x;//图1中匹配点外接矩形的左边界 //img1RightBound = img1LeftBound;//图1中匹配点外接矩形的右边界 //img2LeftBound = inliers[0]->x;//图2中匹配点外接矩形的左边界 //img2RightBound = img2LeftBound;//图2中匹配点外接矩形的右边界 int invertNum = 0;//统计pt2.x > pt1.x的匹配点对的个数,来判断img1中是否右图 //遍历经RANSAC算法筛选后的特征点集合inliers,找到每个特征点的匹配点,画出连线 for(int i=0; i<n_inliers; i++) { feat = inliers[i];//第i个特征点 pt2 = Point(cvRound(feat->x), cvRound(feat->y));//图2中点的坐标 pt1 = Point(cvRound(feat->fwd_match->x), cvRound(feat->fwd_match->y));//图1中点的坐标(feat的匹配点) //qDebug()<<"pt2:("<<pt2.x<<","<<pt2.y<<")--->pt1:("<<pt1.x<<","<<pt1.y<<")";//输出对应点对 /*找匹配点区域的边界 if(pt1.x < img1LeftBound) img1LeftBound = pt1.x; if(pt1.x > img1RightBound) img1RightBound = pt1.x; if(pt2.x < img2LeftBound) img2LeftBound = pt2.x; if(pt2.x > img2RightBound) img2RightBound = pt2.x;//*/ //统计匹配点的左右位置关系,来判断图1和图2的左右位置关系 if(pt2.x > pt1.x) invertNum++; if(verticalStackFlag)//垂直排列 pt2.y += img1->height;//由于两幅图是上下排列的,pt2的纵坐标加上图1的高度,作为连线的终点 else//水平排列 pt2.x += img1->width;//由于两幅图是左右排列的,pt2的横坐标加上图1的宽度,作为连线的终点 cvLine(stacked_ransac,pt1,pt2,CV_RGB(255,0,255),1,8,0);//在匹配图上画出连线 } //绘制图1中包围匹配点的矩形 //cvRectangle(stacked_ransac,cvPoint(img1LeftBound,0),cvPoint(img1RightBound,img1->height),CV_RGB(0,255,0),2); //绘制图2中包围匹配点的矩形 //cvRectangle(stacked_ransac,cvPoint(img1->width+img2LeftBound,0),cvPoint(img1->width+img2RightBound,img2->height),CV_RGB(0,0,255),2); //cvNamedWindow(IMG_MATCH2);//创建窗口 //cvShowImage(IMG_MATCH2,stacked_ransac);//显示经RANSAC算法筛选后的匹配图 //保存匹配图 QString name_match_RANSAC = name1;//文件名,原文件名去掉序号后加"_match_RANSAC" cvSaveImage(name_match_RANSAC.replace( name_match_RANSAC.lastIndexOf(".",-1)-1 , 1 , "_match_RANSAC").toAscii().data(),stacked_ransac); /*程序中计算出的变换矩阵H用来将img2中的点变换为img1中的点,正常情况下img1应该是左图,img2应该是右图。 此时img2中的点pt2和img1中的对应点pt1的x坐标的关系基本都是:pt2.x < pt1.x 若用户打开的img1是右图,img2是左图,则img2中的点pt2和img1中的对应点pt1的x坐标的关系基本都是:pt2.x > pt1.x 所以通过统计对应点变换前后x坐标大小关系,可以知道img1是不是右图。 如果img1是右图,将img1中的匹配点经H的逆阵H_IVT变换后可得到img2中的匹配点*/ //若pt2.x > pt1.x的点的个数大于内点个数的80%,则认定img1中是右图 if(invertNum > n_inliers * 0.8) { qDebug()<<tr("img1中是右图"); CvMat * H_IVT = cvCreateMat(3, 3, CV_64FC1);//变换矩阵的逆矩阵 //求H的逆阵H_IVT时,若成功求出,返回非零值 if( cvInvert(H,H_IVT) ) { // //输出H_IVT // for(int i=0;i<3;i++) // qDebug()<<cvmGet(H_IVT,i,0)<<cvmGet(H_IVT,i,1)<<cvmGet(H_IVT,i,2); cvReleaseMat(&H);//释放变换矩阵H,因为用不到了 H = cvCloneMat(H_IVT);//将H的逆阵H_IVT中的数据拷贝到H中 cvReleaseMat(&H_IVT);//释放逆阵H_IVT //将img1和img2对调 IplImage * temp = img2; img2 = img1; img1 = temp; //cvShowImage(IMG1,img1); //cvShowImage(IMG2,img2); ui->mosaicButton->setEnabled(true);//激活全景拼接按钮 } else//H不可逆时,返回0 { cvReleaseMat(&H_IVT);//释放逆阵H_IVT QMessageBox::warning(this,tr("警告"),tr("变换矩阵H不可逆")); } } else ui->mosaicButton->setEnabled(true);//激活全景拼接按钮 } else //无法计算出变换矩阵,即两幅图中没有重合区域 { QMessageBox::warning(this,tr("警告"),tr("两图中无公共区域")); } ui->radioButton_horizontal->setEnabled(false);//禁用排列方向选择按钮 ui->radioButton_vertical->setEnabled(false); ui->matchButton->setEnabled(false);//禁用特征匹配按钮 }
int main( int argc, char** argv ) { IplImage* img1, * img2, * stacked; struct feature* feat1, * feat2, * feat; struct feature** nbrs; struct kd_node* kd_root; CvPoint pt1, pt2; double d0, d1; int n1, n2, k, i, m = 0; if( argc != 3 ) fatal_error( "usage: %s <img1> <img2>", argv[0] ); img1 = cvLoadImage( argv[1], 1 ); if( ! img1 ) fatal_error( "unable to load image from %s", argv[1] ); img2 = cvLoadImage( argv[2], 1 ); if( ! img2 ) fatal_error( "unable to load image from %s", argv[2] ); stacked = stack_imgs( img1, img2 ); fprintf( stderr, "Finding features in %s...\n", argv[1] ); n1 = sift_features( img1, &feat1 ); fprintf( stderr, "Finding features in %s...\n", argv[2] ); n2 = sift_features( img2, &feat2 ); kd_root = kdtree_build( feat2, n2 ); for( i = 0; i < n1; i++ ) { feat = feat1 + i; k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS ); if( k == 2 ) { d0 = descr_dist_sq( feat, nbrs[0] ); d1 = descr_dist_sq( feat, nbrs[1] ); if( d0 < d1 * NN_SQ_DIST_RATIO_THR ) { pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) ); pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) ); pt2.y += img1->height; cvLine( stacked, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 ); m++; feat1[i].fwd_match = nbrs[0]; } } free( nbrs ); } fprintf( stderr, "Found %d total matches\n", m ); display_big_img( stacked, "Matches" ); cvWaitKey( 0 ); /* UNCOMMENT BELOW TO SEE HOW RANSAC FUNCTION WORKS Note that this line above: feat1[i].fwd_match = nbrs[0]; is important for the RANSAC function to work. */ // /* // { CvMat* H; IplImage* xformed; H = ransac_xform( feat1, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.01, homog_xfer_err, 3.0, NULL, NULL ); if( H ) { xformed = cvCreateImage( cvGetSize( img2 ), IPL_DEPTH_8U, 3 ); cvWarpPerspective( img1, xformed, H, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) ); cvNamedWindow( "Xformed", 1 ); cvShowImage( "Xformed", xformed ); cvWaitKey( 0 ); cvReleaseImage( &xformed ); cvReleaseMat( &H ); } // } //*/ cvReleaseImage( &stacked ); cvReleaseImage( &img1 ); cvReleaseImage( &img2 ); kdtree_release( kd_root ); free( feat1 ); free( feat2 ); return 0; }
int main(int argc, char *argv[]) { IplImage* img1, * img2, * stacked1, *stacked2; char stemp[1024]; // printf("Reading images: %s and %s\n",argv[1],argv[2]); if(argc != 3) {printf("\n\nUsage: getsift [image1.jpg] [image2.jpg]\n\n"); exit(0);} img1=read_jpeg_file(argv[1]); img2=read_jpeg_file(argv[2]); stacked1 = stack_imgs( img1, img2 ); stacked2 = stack_imgs( img1, img2 ); struct feature* feat1, * feat2, * feat; struct feature** nbrs; struct feature** RANnb; struct kd_node* kd_root; CvPoint pt1, pt2; double d0, d1; int n1, n2, k, i,j, m = 0, n=0; printf("SIFT Features Extraction: %s\n", argv[1]); n1 = sift_features( img1, &feat1 ); printf("Numbers of Features from %s: %d\n",argv[1], n1); printf("SIFT Features Extraction: %s\n", argv[2]); n2 = sift_features( img2, &feat2 ); printf("Numbers of Features from %s: %d\n",argv[2], n2); sprintf(stemp,"%s.sift.jpg",argv[1]); draw_keypoint( img1, feat1, n1 ); write_jpeg_file(stemp,img1); sprintf(stemp,"%s.sift.jpg",argv[2]); draw_keypoint( img2, feat2, n2 ); write_jpeg_file(stemp,img2); FILE * feat1file; FILE * feat2file; feat1file=fopen("features1.txt","w+"); for(i=0;i<n1;i++) { fprintf(feat1file,"(%lf,%lf): {",(feat1+i)->x,(feat1+i)->y); for(j=0;j<FEATURE_MAX_D;j++) fprintf(feat1file,"% lf ",(feat1+i)->descr[j]); fprintf(feat1file,"}\n"); } printf("coordinate and descriptor of %s keypoints have been written in featfile1.txt\n",argv[1]); feat2file=fopen("features2.txt","w+"); for(i=0;i<n2;i++) { fprintf(feat2file,"(%lf,%lf): {",(feat2+i)->x,(feat2+i)->y); for(j=0;j<FEATURE_MAX_D;j++) fprintf(feat2file,"% lf ",(feat2+i)->descr[j]); fprintf(feat2file,"}\n"); } printf("coordinate and descriptor of %s keypoints have been written in featfile2.txt\n",argv[2]); kd_root = kdtree_build( feat2, n2 ); for( i = 0; i < n1; i++ ) { feat = feat1 + i; k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS ); if( k == 2 ) { d0 = descr_dist_sq( feat, nbrs[0] ); d1 = descr_dist_sq( feat, nbrs[1] ); if( d0 < d1 * NN_SQ_DIST_RATIO_THR ) { pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) ); pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) ); pt2.y += img1->height; cvLine( stacked1, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 ); m++; feat1[i].fwd_match = nbrs[0]; } } free( nbrs ); } printf("Found %d total matches\n", m ); write_jpeg_file("matches.jpg",stacked1); CvMat* H; int number=0; H = ransac_xform( feat1, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.25, homog_xfer_err, 27.0, &RANnb, &number ); for( i = 0; i < number; i++ ) { pt1 = cvPoint( cvRound( RANnb[i]->x ), cvRound( RANnb[i]->y ) ); pt2 = cvPoint( cvRound( RANnb[i]->fwd_match->x ), cvRound( RANnb[i]->fwd_match->y ) ); pt2.y += img1->height; cvLine( stacked2, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 ); n++; } printf("Found %d total matches after RANSAC\n", n ); write_jpeg_file("matches.ransac.jpg",stacked2); cvReleaseImage( &img1 ); cvReleaseImage( &img2 ); kdtree_release( kd_root ); free( feat1 ); free( feat2 ); return 0; }
int main( int argc, char** argv ) { IplImage* img1, * img2, * stacked; struct feature* feat1, * feat2, * feat; struct feature** nbrs; struct kd_node* kd_root; CvPoint pt1, pt2; double d0, d1; int n1, n2, k, i, m = 0; int match_cnt = 0; //CvMat *im1_mask = cvCreateMat( img1->height, img1->width, CV_64FC1 ); //CvMat *im2_mask = cvCreateMat( img2->height, img2->width, CV_64FC1 ); //cvSet( im1_mask, cvScalar( 1, 0, 0, 0 ), NULL ); //cvSet( im2_mask, cvScalar( 1, 0, 0, 0 ), NULL ); if( argc != 3 ) fatal_error( "usage: %s <img1> <img2>", argv[0] ); img1 = cvLoadImage( argv[1], 1 ); if( ! img1 ) fatal_error( "unable to load image from %s", argv[1] ); img2 = cvLoadImage( argv[2], 1 ); if( ! img2 ) fatal_error( "unable to load image from %s", argv[2] ); stacked = stack_imgs( img1, img2 ); fprintf( stderr, "Finding features in %s...\n", argv[1] ); n1 = sift_features( img1, &feat1 ); fprintf( stderr, "Finding features in %s...\n", argv[2] ); n2 = sift_features( img2, &feat2 ); kd_root = kdtree_build( feat2, n2 ); for( i = 0; i < n1; i++ ) { feat = feat1 + i; k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS ); if( k == 2 ) { d0 = descr_dist_sq( feat, nbrs[0] ); d1 = descr_dist_sq( feat, nbrs[1] ); if( d0 < d1 * NN_SQ_DIST_RATIO_THR ) { if( m >= 2000 ) break; pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) ); pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) ); pt2.y += img1->height; cvLine( stacked, pt1, pt2, CV_RGB(255,0,255), 1, 8, 0 ); m++; feat1[i].fwd_match = nbrs[0]; } } free( nbrs ); } fprintf( stderr, "Found %d total matches\n", m ); display_big_img( stacked, "Matches" ); cvWaitKey( 0 ); /*********************************************************************************************************/ CvMat* H; IplImage* xformed; H = ransac_xform( feat1, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.01, homog_xfer_err, 3.0, NULL, NULL ); /* output H */ printf("xform Homography Matrix is :\n"); printMat( H ); /* get the size of new image */ double XDATA[2]; double YDATA[2]; CvSize new_size; CvSize im1_size = cvGetSize( img1 ); CvSize im2_size = cvGetSize( img2 ); new_size = get_Stitched_Size( im1_size, im2_size, H, XDATA, YDATA ); /*declare the mask*/ CvMat *im1_mask = cvCreateMat( new_size.height, new_size.width, CV_64FC1 ); CvMat *im2_mask = cvCreateMat( new_size.height, new_size.width, CV_64FC1 ); CvMat *im1_tempMask = cvCreateMat( im1_size.height, im1_size.width, CV_64FC1 ); CvMat *im2_tempMask = cvCreateMat( im2_size.height, im2_size.width, CV_64FC1 ); cvSet( im1_tempMask, cvScalar(1,0,0,0), NULL ); cvSet( im2_tempMask, cvScalar(1,0,0,0), NULL ); /* get translation Matrix for Aligning */ CvMat *T = cvCreateMat( 3, 3, CV_64FC1 ); double tx = 0; double ty = 0; if( XDATA[0] < 0 ) { tx = -XDATA[0] ; printf("tx = %f\n", tx ); } if( YDATA[0] < 0 ) { ty = -YDATA[0]; printf("ty = %f\n", ty ); } cvmSet( T, 0, 0, 1 ); cvmSet( T, 0, 2, tx ); cvmSet( T, 1, 1, 1 ); cvmSet( T, 1, 2, ty ); cvmSet( T, 2, 2, 1 ); printf("T Matrix:\n"); printMat( T ); /* Transform and Align image2 */ cvGEMM( T, H, 1, NULL, 0, H, 0 ); printMat( H ); xformed = cvCreateImage( new_size, IPL_DEPTH_8U, 3 ); cvWarpPerspective( img1, xformed, H, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) ); cvNamedWindow( "Xformed1", 1 ); cvShowImage( "Xformed1", xformed ); cvWaitKey( 0 ); cvDestroyWindow("Xformed1"); //cvSaveImage("im2.png", xformed); cvWarpPerspective( im1_tempMask, im1_mask, H, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) ); //cvSaveImage("im1_mask.png", im1_mask); cvNamedWindow( "im1_mask", 1 ); cvShowImage( "im1_mask", im1_mask ); cvWaitKey( 0 ); cvDestroyWindow("im1_mask"); /* Align image1 to bound */ cvWarpPerspective( im2_tempMask, im2_mask, T, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) ); //cvSaveImage( "im2_mask.png", im2_mask ); cvNamedWindow( "im12_mask", 1 ); cvShowImage( "im2_mask", im2_mask ); cvWaitKey( 0 ); cvDestroyWindow("im2_mask"); cvSetImageROI( xformed, cvRect( tx, ty, img2->width, img2->height ) ); cvCopy( img2, xformed, NULL ); IplImage* huaijin = cvCreateImage( new_size, IPL_DEPTH_8U, 3 ); cvWarpPerspective( img2, huaijin, T, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll( 0 ) ); cvNamedWindow( "im12_mask_i", 1 ); cvShowImage( "im2_mask_i", huaijin); cvWaitKey( 0 ); cvDestroyWindow("im2_mask_i"); cvResetImageROI( xformed ); //cvSaveImage( "re.png", xformed ); /* composite */ cvNamedWindow( "Xformed", 1 ); cvShowImage( "Xformed", xformed ); cvWaitKey( 0 ); cvDestroyWindow("Xformed"); /* */ cvReleaseImage( &xformed ); cvReleaseMat( &H ); cvReleaseMat( &im1_tempMask ); cvReleaseMat( &im2_tempMask ); cvReleaseMat( &T ); cvReleaseMat ( &im1_mask ); cvReleaseMat ( &im2_mask ); cvReleaseImage( &stacked ); cvReleaseImage( &img1 ); cvReleaseImage( &img2 ); kdtree_release( kd_root ); free( feat1 ); free( feat2 ); /**************************************************** * using RANSAC algorithm get the Homegraphy Matrix * get T image1-->image2 * * n_pts the number of points for estimating parameters * * * * create unique indics of matchs : get_randi( int j ) * * * ******slove*********** * 1.create the coff matrix * 2.Ah=0 -->decomposit A = UDV^T using gsl * 3 * [ v19 v29 v39 ... v99 ] * h = ------------------------- * v99 * * ***************************************************/ /* CvMat *H1 = cvCreateMat( 3, 3, CV_64FC1 ); CvMat *inliers_mask = cvCreateMat( m, 1, CV_64FC1 ); RANSAC_Homography( m, pts1, pts2, H1, inliers_mask ); printf("my code Homography Matrix is :\n"); for ( i = 0; i < H->rows; i++ ){ for( k = 0; k < H->cols; k++ ){ printf("%f ",cvmGet( H1, i, k )); } printf("\n"); } cvReleaseMat( &H1 ); cvReleaseMat( &inliers_mask );*/ /*********************************************** * composit image1 & image2 * 1) transform image2 to image * * 2)***stitched image bounds**** * W = max( [size(im1,2) size(im1,2)-XDATA(1) size(im2,2) size(im2,2)+XDATA(1)] ); * H = max( [size(im1,1) size(im1,1)-YDATA(1) size(im2,1) size(im2,1)+YDATA(1)] ); * * 3)*** Align image1 to bound *** * * 4)*** Align image2 to bound *** * * 5)*** Check size of bounds *** * * 6)*** combine both images *** * im1_mask * im2_mask * im1_part_mask * im2_part_mask * com_part_mask * stitched_image * * 7)****copy im2 transformed to ROI of stitching plan ( just a idear ) * ************************************************/ void compositImages( IplImage *im1, IplImage *im2, CvMat *H ) { /* 1.create a plan */ /* 2.transform im2 & im2_mask */ /* 3.emstime translation of im2 --T cp im1 -->plan with cvRect( tx, ty) */ /* 4.cp tranformed im2 to plan with im2_mask where im2_mask has the same size with plan */ } return 0; }
int main( int argc, char** argv ) { int KDTREE_BBF_MAX_NN_CHKS = 200; float NN_SQ_DIST_RATIO_THR = 0.49; const CvScalar color = cvScalar( 255, 255, 0 ); ros::init(argc, argv, "SIFT"); ros::NodeHandle n; ros::Rate rate(33); ImageConverter ic; while ( !ic.ready ) { ros::spinOnce(); rate.sleep(); if ( !ros::ok() ) { printf("terminated by control_c\n"); return 0; } } string filepath = ros::package::getPath("sift") + "/"; ifstream fin( (filepath + "store.txt").data(), ios::in); //ifstream fin_main_pic( (filepath + "main_pic.txt").data(), ios::in); int pic_num = 5; string find; //cout << "how many pictures?" << endl; //cin >> pic_num; //cout << "which picture?" << endl; //cin >> find; time_t rawtime; struct tm * timeinfo; time ( &rawtime ); timeinfo = localtime ( &rawtime ); printf ( "The current date/time is: %s", asctime (timeinfo) ); char line[1024] = {0}; string* store = new string [pic_num+1]; string main_pic_name; int pic = 0; int find_pic = 0; while(fin.getline(line, sizeof(line))) { stringstream word(line); word >> store[pic]; store[pic] = filepath + store[pic]; /*if (store[pic] == find) { cout << store[pic] << endl; find_pic = pic; }*/ pic++; } //fin_main_pic.getline(line, sizeof(line)); //stringstream word(line); //word >> main_pic_name; //fin_main_pic.clear(); //fin_main_pic.close(); fin.clear(); fin.close(); IplImage* img; //IplImage* img1; struct feature* features;//, * features1; feature** features_all = new feature*[pic]; int* features_num = new int[pic]; for (int i = 0; i < pic; i++ ) features_num[i] = 0; IplImage** img_all = new IplImage*[pic]; for ( int i = 0; i < pic; i++ ) { //printf ( "Finding features in template picture %d\n", i ); img_all[i] = cvLoadImage( store[i].data(), 1 ); features_num[i] = sift_features( img_all[i], &features_all[i] ); printf ( "%d features in template picture %d\n", features_num[i], i ); time ( &rawtime ); timeinfo = localtime ( &rawtime ); printf ( "The current date/time is: %s", asctime (timeinfo) ); } /* printf ( "Finding features in main picture\n" ); img = cvLoadImage( main_pic_name.data(), 1 ); int n1 = sift_features( img, &features ); printf ( "%d features in main picture\n", n1 ); */ //cvShowImage( "main", img ); //for (int i = 0; i < n1; i++) //cvCircle( img, cvPoint(features[i].x, features[i].y), 5, color, 1, 8, 0 ); //cvShowImage( "Foundmain", img ); //cvShowImage( "template", img1 ); //for (int i = 0; i < n2; i++) //cvCircle( img1, cvPoint(features1[i].x, features1[i].y), 5, color, 1, 8, 0 ); //cvShowImage( "Foundtemplate", img1 ); bool features_catched = false; while ( ros::ok() ) { if ( ic.ready == true ) { ic.ready = false; *img = ic.curr_image; int n1 = sift_features( img, &features ); printf ( "%d features in main picture\n", n1 ); time ( &rawtime ); timeinfo = localtime ( &rawtime ); printf ( "The current date/time is: %s", asctime (timeinfo) ); features_catched = false; for ( int j = 0; j < pic ; j++ ) { IplImage* stacked; IplImage* ransac; struct feature* feat; struct feature** nbrs; struct kd_node* kd_root; CvPoint pt1, pt2; double d0, d1; int k, i, m = 0; CvMat point1_test; CvMat point2_test; double point1[3]; double point2[3] = { 0 }; stacked = stack_imgs( img, img_all[j] ); ransac = stack_imgs( img, img_all[j] ); kd_root = kdtree_build( features_all[j], features_num[j] ); for( i = 0; i < n1; i++ ) { feat = features + i; k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS ); if( k == 2 ) { d0 = descr_dist_sq( feat, nbrs[0] ); d1 = descr_dist_sq( feat, nbrs[1] ); if( d0 < d1 * NN_SQ_DIST_RATIO_THR ) { //pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) ); //pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) ); //pt2.y += img->height; //cvCircle( stacked, pt1, 3, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 ); //cvCircle( stacked, pt2, 3, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 ); //cvLine( stacked, pt1, pt2, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 ); m++; features[i].fwd_match = nbrs[0]; } } free( nbrs ); } double accounts = m * 100 / (double)features_num[j]; printf( "%d total matches, accounts for %f %%, in pic %d\n", m, accounts, j); //cvNamedWindow( "Matches", 1 ); //cvShowImage( "Matches", stacked ); time ( &rawtime ); timeinfo = localtime ( &rawtime ); printf ( "The current date/time is: %s", asctime (timeinfo) ); CvMat* H = ransac_xform( features, n1, FEATURE_FWD_MATCH, lsq_homog, 4, 0.01, error, 2, NULL, NULL ); if( H ) { for( i = 0; i < n1; i++ ) { feat = features + i; k = kdtree_bbf_knn( kd_root, feat, 2, &nbrs, KDTREE_BBF_MAX_NN_CHKS ); if( k == 2 ) { d0 = descr_dist_sq( feat, nbrs[0] ); d1 = descr_dist_sq( feat, nbrs[1] ); if( d0 < d1 * NN_SQ_DIST_RATIO_THR ) { pt1 = cvPoint( cvRound( feat->x ), cvRound( feat->y ) ); pt2 = cvPoint( cvRound( nbrs[0]->x ), cvRound( nbrs[0]->y ) ); pt2.y += img->height; point1[0] = pt1.x; point1[1] = pt1.y; point1[2] = 1.0; cvInitMatHeader( &point1_test, 3, 1, CV_64FC1, point1, CV_AUTOSTEP ); cvInitMatHeader( &point2_test, 3, 1, CV_64FC1, point2, CV_AUTOSTEP ); cvMatMul( H, &point1_test, &point2_test ); /*if ( abs( point2[0]/point2[2]-pt2.x) < 2 && abs( point2[1]/point2[2]+img->height-pt2.y) < 2 ) { cvCircle( ransac, pt1, 3, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 ); cvCircle( ransac, pt2, 3, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 ); cvLine( ransac, pt1, pt2, cvScalar( (i*10)%255, (i*10)%255, 127 ), 1, 8, 0 ); }*/ // features[i].fwd_match = nbrs[0]; } } free( nbrs ); //printf("features catched, going to exit\n"); } //cvNamedWindow( "Xformed" ); //cvShowImage( "Xformed", ransac ); features_catched = true; time ( &rawtime ); timeinfo = localtime ( &rawtime ); printf ( "ransac.. The current date/time is: %s", asctime (timeinfo) ); } //cvWaitKey( 0 ); cvReleaseImage( &ransac ); cvReleaseMat( &H ); //cvDestroyWindow( "main" ); //cvDestroyWindow( "Foundmain" ); //cvDestroyWindow( "template" ); //cvDestroyWindow( "Foundtemplate" ); //cvReleaseImage( &img_all[j] ); cvReleaseImage( &stacked ); kdtree_release( kd_root ); } if (!features_catched) { printf("Sorry, there is no item in the picture\n"); } else { printf("Item catched in the picture!\n"); } } ros::spinOnce(); rate.sleep(); } //cvReleaseImage( &img ); free( features ); for ( int i = 0; i < pic; i++ ) { free( features_all[i] ); } free(features_all); return 0; }