コード例 #1
0
ファイル: siftmatch.cpp プロジェクト: githubcjl/uVision_cjl
//特征点检测
void SiftMatch::on_detectButton_clicked()
{
    img1_Feat = cvCloneImage(img1);//复制图1,深拷贝,用来画特征点
    img2_Feat = cvCloneImage(img2);//复制图2,深拷贝,用来画特征点

    //默认提取的是LOWE格式的SIFT特征点
    //提取并显示第1幅图片上的特征点
    n1 = sift_features( img1, &feat1 );//检测图1中的SIFT特征点,n1是图1的特征点个数
    export_features("feature1.txt",feat1,n1);//将特征向量数据写入到文件
    draw_features( img1_Feat, feat1, n1 );//画出特征点

    //cvNamedWindow(IMG1_FEAT);//创建窗口
    //cvShowImage(IMG1_FEAT,img1_Feat);//显示

    QString name1_Feat = name1;//文件名,原文件名加"_Feat"
    cvSaveImage(name1_Feat.insert( name1_Feat.lastIndexOf(".",-1) , "_Feat").toAscii().data(),img1_Feat);//保存图片

    //提取并显示第2幅图片上的特征点
    n2 = sift_features( img2, &feat2 );//检测图2中的SIFT特征点,n2是图2的特征点个数
    export_features("feature2.txt",feat2,n2);//将特征向量数据写入到文件
    draw_features( img2_Feat, feat2, n2 );//画出特征点

    //cvNamedWindow(IMG2_FEAT);//创建窗口
    //cvShowImage(IMG2_FEAT,img2_Feat);//显示

    QString name2_Feat = name2;//文件名,原文件名加"_Feat"
    cvSaveImage(name2_Feat.insert( name2_Feat.lastIndexOf(".",-1) , "_Feat").toAscii().data(),img2_Feat);//保存图片

    ui->detectButton->setEnabled(false);//禁用特征检测按钮
    ui->radioButton_horizontal->setEnabled(true);//激活排列方向选择按钮
    ui->radioButton_vertical->setEnabled(true);
    ui->matchButton->setEnabled(true);//激活特征匹配按钮
}
コード例 #2
0
ファイル: siftfeat.c プロジェクト: Noobidoo/insight3d
int main( int argc, char** argv )
{
	IplImage* img;
	struct feature* features;
	int n = 0;

	fprintf( stderr, "Finding SIFT features...\n" );
	img = cvLoadImage( img_file_name, 1 );
	if( ! img )
	{
		fprintf( stderr, "unable to load image from %s", img_file_name );
		exit( 1 );
	}
	n = _sift_features( img, &features, intvls, sigma, contr_thr, curv_thr,
						img_dbl, descr_width, descr_hist_bins );
	fprintf( stderr, "Found %d features.\n", n );

	if( display )
	{
		draw_features( img, features, n );
		cvNamedWindow( img_file_name, 1 );
		cvShowImage( img_file_name, img );
		cvWaitKey( 0 );
	}

	if( out_file_name != NULL )
		export_features( out_file_name, features, n );

	if( out_img_name != NULL )
		cvSaveImage( out_img_name, img );
	return 0;
}
コード例 #3
0
ファイル: siftfeat.c プロジェクト: xiawei0000/Logo_de
int main2( int argc, char** argv )
{
  IplImage* img;
  struct feature* features;
  int n = 0;

  arg_parse( argc, argv );

  fprintf( stderr, "Finding SIFT features...\n" );
  img = cvLoadImage( img_file_name, 1 );
  if( ! img )
    fatal_error( "unable to load image from %s", img_file_name );
  n = _sift_features( img, &features, intvls, sigma, contr_thr, curv_thr,
		      img_dbl, descr_width, descr_hist_bins );
  fprintf( stderr, "Found %d features.\n", n );
  
  if( display )
    {
      draw_features( img, features, n );
      display_big_img( img, img_file_name );
      cvWaitKey( 0 );
    }

  if( out_file_name != NULL )
    export_features( out_file_name, features, n );

  if( out_img_name != NULL )
    cvSaveImage( out_img_name, img, NULL );

     cvReleaseImage( &img );
  return 0;
}
コード例 #4
0
ファイル: dspfeat.c プロジェクト: nyb/sift-win-recompiled
int main( int argc, char** argv )
{	
	IplImage* img;
	struct feature* feat;
	char* name;
	int n;
	char feat_image_name[200];

	if (argc==3){//command:dspFeat .sift image
		feat_file=argv[1];
		img_file=argv[2];
	}
	
	img = cvLoadImage( img_file, 1 );
	if( ! img )
		fatal_error( "unable to load image from %s", img_file );
	n = import_features( feat_file, feat_type, &feat );
	if( n == -1 )
		fatal_error( "unable to import features from %s", feat_file );
	name = feat_file;

	draw_features( img, feat, n );
	cvNamedWindow( name, 1 );
	cvShowImage( name, img );
	sprintf_s(feat_image_name,200,"%s.jpg",feat_file);
	cvSaveImage(feat_image_name,img,0);
	cvWaitKey( 0 );
	return 0;
}
コード例 #5
0
int FeaturesDetectionApplication::main_loop(program_options::variables_map &options)
{
    printf("FeaturesDetectionApplication::main_loop says hello world !\n");


    //init_gui(options);
    //run_gui();

	// initialization ---
    gst_video_input_p.reset(new GstVideoInput(options));
    features_detector_p.reset(new SimpleFAST(options));

    // video output ---
    rgb8_cimg_t current_image(gst_video_input_p->get_image_dimensions());
    gst_video_input_p->get_new_image(current_image.view); // copy the data


    CImgDisplay video_display(current_image.dimx(), current_image.dimy(), get_application_title().c_str());
    video_display.show();
    video_display.display(current_image);

    // intermediary image --
    gray8_image_t gray_image(current_image.view.dimensions());

    // main loop ---

    do
    {
        // get new image --
        gst_video_input_p->get_new_image(current_image.view); // copy the data

        // color to gray_image
        copy_and_convert_pixels(current_image.view, boost::gil::view(gray_image));
        
        // compute features
        const vector<FASTFeature> &features =
            features_detector_p->detect_features((const_view(gray_image)));

        // plot features on output image
        draw_features(features, current_image);

        video_display.display(current_image);

        // add a delay ---	
        wait_some_seconds(0.1); // [seconds]


    }
    while (video_display.is_closed == false);

    return 0;

}
// [ref] ${OPENSIFT_HOME}/src/siftfeat.c
void extract_feature()
{
#if 1
    const std::string in_img_file_name("./data/feature_analysis/sift/beaver.png");
    const std::string out_sift_file_name("./data/feature_analysis/sift/beaver.sift");
    const std::string out_img_file_name;
#elif 0
    const std::string in_img_file_name("./data/feature_analysis/sift/marker_pen_2.bmp");
    const std::string out_sift_file_name("./data/feature_analysis/sift/marker_pen_2.sift");
    const std::string out_img_file_name;
#endif

    const int display = 1;
    const int intvls = SIFT_INTVLS;
    const double sigma = SIFT_SIGMA;
    const double contr_thr = SIFT_CONTR_THR;
    const int curv_thr = SIFT_CURV_THR;
    const int img_dbl = SIFT_IMG_DBL;
    const int descr_width = SIFT_DESCR_WIDTH;
    const int descr_hist_bins = SIFT_DESCR_HIST_BINS;

    std::cout << "finding SIFT features..." << std::endl;
    IplImage *img = cvLoadImage(in_img_file_name.c_str(), 1);
    if (!img)
    {
        std::cout <<"unable to load image from " << in_img_file_name << std::endl;
        return;
    }

    struct feature *features;
    const int n = _sift_features(img, &features, intvls, sigma, contr_thr, curv_thr, img_dbl, descr_width, descr_hist_bins);
    std::cout << "found " << n << " features." << std::endl;

    if (display)
    {
        draw_features(img, features, n);
        cvNamedWindow(in_img_file_name.c_str(), 1);
        cvShowImage(in_img_file_name.c_str(), img);
        cvWaitKey(0);
    }

    if (!out_sift_file_name.empty())
        export_features((char *)out_sift_file_name.c_str(), features, n);

    if (!out_img_file_name.empty())
        cvSaveImage(out_img_file_name.c_str(), img);
}
コード例 #7
0
ファイル: dspfeat.c プロジェクト: gamman/MRPT
int main( int argc, char** argv )
{
	IplImage* img;
	struct feature* feat;
	char* name;
	int n;

	img = cvLoadImage( img_file, 1 );
	if( ! img )
		fatal_error( "unable to load image from %s", img_file );
	n = import_features( feat_file, feat_type, &feat );
	if( n == -1 )
		fatal_error( "unable to import features from %s", feat_file );
	name = feat_file;

	draw_features( img, feat, n );
	cvNamedWindow( name, 1 );
	cvShowImage( name, img );
	cvWaitKey( 0 );
	return 0;
}
コード例 #8
0
ファイル: draw.cpp プロジェクト: Patrick6289/navguide
/* <ww, wh> is the size of the OpenGL window
*/
void draw_frame (int ww, int wh, botlcm_image_t **img1, botlcm_image_t **img2, int nimg, navlcm_feature_list_t *features1, navlcm_feature_list_t *features2, navlcm_feature_match_set_t *matches, double user_scale, double *user_trans, gboolean feature_finder_enabled, gboolean select, navlcm_feature_t *highlight_f, navlcm_feature_match_t *highlight_m, navlcm_feature_t *match_f1, navlcm_feature_t *match_f2, navlcm_feature_match_t *select_m, navlcm_feature_match_t *match, int feature_type, double precision, double recall)
{
    if (!img1[0]) return;

    // determine scale factor
    float window_scale = .95*fmin (.25*ww/img1[0]->width, 1.0*wh/img1[0]->height);
    float ratio = user_scale* window_scale;
    double trans1[2] = {user_trans[0], user_trans[1]};
    double trans2[2] = {user_trans[0], user_trans[1] + ratio*img1[0]->height};

    if (!select) {
        // draw images
        draw_images (img1, nimg, ratio, trans1);
        draw_images (img2, nimg, ratio, trans2);
    }

    // this is the current match to be (or not to be) added to ground-truth
    if (match) {
        draw_match (match, GREEN, 2, img1[0]->width, img1[0]->height, ratio, trans1, trans2);
        draw_match_ends (match, RED, 4, img1[0]->width, img1[0]->height, ratio, trans1, trans2);
    }

    // draw features
    assert (check_gl_error ("1"));
    draw_features (features1, RED, 4, ratio, trans1, select, NAME_POINT1, highlight_f, feature_finder_enabled);
    assert (check_gl_error ("1"));
    draw_features (features2, RED, 4, ratio, trans2, select, NAME_POINT2, highlight_f, feature_finder_enabled);
    assert (check_gl_error ("1"));

    // draw matches
    draw_matches (matches, YELLOW, 2, img1[0]->width, img1[0]->height, ratio, trans1, trans2, select, highlight_m, select_m);

    // draw selected items
    if (features1 && features2) {
        draw_select_feature (match_f1, features1->width, GREEN, 12, ratio, trans1);
        draw_select_feature (match_f2, features2->width, GREEN, 12, ratio, trans2);
    }

    // draw feature thumbnails
    if (!select && select_m) {
        draw_img_subregion (img1, nimg, select_m->src.col, select_m->src.row, select_m->src.sensorid, select_m->src.scale, select_m->src.ori, feature_type, 0, 0);
        draw_img_subregion (img2, nimg, select_m->dst[0].col, select_m->dst[0].row, select_m->dst[0].sensorid, select_m->dst[0].scale, select_m->dst[0].ori, feature_type, 0, wh/2);
    } else if (!select && highlight_m) {
        draw_img_subregion (img1, nimg, highlight_m->src.col, highlight_m->src.row, highlight_m->src.sensorid, highlight_m->src.scale, highlight_m->src.ori, feature_type, 0, 0);
        draw_img_subregion (img2, nimg, highlight_m->dst[0].col, highlight_m->dst[0].row, highlight_m->dst[0].sensorid, highlight_m->dst[0].scale, highlight_m->dst[0].ori, feature_type, 0, wh/2);
    } else {
        if (!select && highlight_f) {
            draw_img_subregion (img1, nimg, highlight_f->col, highlight_f->row, highlight_f->sensorid, highlight_f->scale, highlight_f->ori, feature_type, 0, 0);
            draw_img_subregion (img2, nimg, highlight_f->col, highlight_f->row, highlight_f->sensorid, highlight_f->scale, highlight_f->ori, feature_type, 0, wh/2);
        }
        if (!select && match_f1)
            draw_img_subregion (img1, nimg, match_f1->col, match_f1->row, match_f1->sensorid, match_f1->scale, match_f1->ori, feature_type, 0, 0);
        if (!select && match_f2)
            draw_img_subregion (img2, nimg, match_f2->col, match_f2->row, match_f2->sensorid, match_f2->scale, match_f2->ori, feature_type, 0, wh/2);
    }

    assert (check_gl_error ("1"));

    if (!select) {
        // display text info
        if (matches)
            display_msg (10, wh-20, ww, wh, RED, 0, "# matches: %d", matches->num);
        if (features1 && features2)
            display_msg (200, wh-20, ww, wh, RED, 0, "# features: %d/%d", features1->num, features2->num);
        display_msg (500, wh-20, ww, wh, RED, 0, "precision: %.2f%% recall: %.2f %%", 100.0*precision, 100.0*recall);
    }
}