示例#1
0
/* 特徴量の書き出し */
void SIFTExtractor::save_feature(string file_name)
{
    if(!extraction)
    {
        cout << "image features could not extract yet." << endl;
        return;
    }

    // 書き出しファイルのオープン
    fstream fout(file_name.c_str(), ios::out);
    if (!fout.is_open())
    {
        cerr << "cannot open feature file: " << file_name << "¥n";
        return;
    }

    // 出力精度の設定(小数点以下6桁)
    fout.setf(ios::fixed);
    fout.precision(6);

    //一行目にキーポイント数と次元数を書き込む
    fout << keypoints.size() << "\t" << extractor.descriptorSize();
    fout << endl;

    // 特徴点を書き出す.
    std::vector<cv::KeyPoint>::iterator itk;
    int keypoint_index = 0;
    for (itk = keypoints.begin(); itk != keypoints.end(); ++itk, keypoint_index++)
    {
        // (1) キーポイント情報の出力
        // x座標,y座標はスケーリングパラメータに応じて変化させる
        fout << (int)((itk->pt.x) / scale_x) << "\t";  // x座標
        fout << (int)((itk->pt.y) / scale_y) << "\t";  // y座標
        fout << itk->size << "\t";  // スケール
        fout << itk->angle << "\t"; // 向き

        //(2) 特徴ベクトルの出力
        for (int dim_index = 0; dim_index < extractor.descriptorSize(); dim_index++)
            fout << (int) (descriptors.at<float>(keypoint_index, dim_index)) << "\t";

        fout << endl;
    }
    fout.close();
}
void compute_bow_histogram(cv::Mat &sample, Histogram &feature_vector, cv::Ptr<cv::FeatureDetector> &detector, cv::SiftDescriptorExtractor &extractor, Quantization *quant){
    //detect keypoints
    std::vector<cv::KeyPoint> keypoints;
    detector->detect( sample, keypoints );

    //compute descriptor
    cv::Mat descriptor_uchar;
    extractor.compute(sample, keypoints, descriptor_uchar);

    cv::Mat descriptor_double;
    descriptor_uchar.convertTo(descriptor_double, CV_64F);

    //convert from mat to bag of unquantized features
    BagOfFeatures unquantized_features;
    convert_mat_to_vector(descriptor_double, unquantized_features);

    //quantize regions -- true BagOfFeatures
    quant->quantize(unquantized_features, feature_vector);
}
Histogram compute_histogram(std::string imFile, cv::Ptr<cv::FeatureDetector> &detector, cv::Ptr<cv::FeatureDetector> &detector2, cv::SiftDescriptorExtractor &extractor, Quantization *quant) {
    cv::Mat img = cv::imread(imFile);

    //detect SIFT keypoints
    std::vector<cv::KeyPoint> keypoints;
    detector->detect( img, keypoints );

    //detect MSER keypoints
    std::vector<cv::KeyPoint> keypoints2;
    detector2->detect( img, keypoints2 );

    //group them together
    for(cv::KeyPoint& keypoint : keypoints2) {
        keypoints.push_back(keypoint);
    }

    std::cout << " - keypoint_ct: " << keypoints.size() << std::endl;

    //compute descriptors
    cv::Mat descriptor_uchar;
    extractor.compute(img, keypoints, descriptor_uchar);

    cv::Mat descriptor_double;
    descriptor_uchar.convertTo(descriptor_double, CV_64F);

    //convert from mat to bag of unquantized features
    BagOfFeatures unquantized_features;
    convert_mat_to_vector(descriptor_double, unquantized_features);

    //quantize to form bag of words
    Histogram bag_of_words;
    quant->quantize(unquantized_features, bag_of_words);

    //normalize

    return bag_of_words;
}
//gets centroid for category from training images
void LocalDescriptorAndBagOfFeature::train_category(const std::vector<cv::Mat> &samples, Histogram &centroid, const cv::Ptr<cv::FeatureDetector> &detector, const cv::SiftDescriptorExtractor &extractor, Quantization *quant){
    clock_t start = clock();
    int i = 0;
    for(const cv::Mat& sample : samples){
        i++;
        std::cout << "converting img " << i << " of " << samples.size() << " to bag of features" << std::endl;

        //detect keypoints
        std::vector<cv::KeyPoint> keypoints;
        detector->detect( sample, keypoints );

        //compute descriptor
        cv::Mat descriptor_uchar;
        extractor.compute(sample, keypoints, descriptor_uchar);

        cv::Mat descriptor_double;
        descriptor_uchar.convertTo(descriptor_double, CV_64F);

        //convert from mat to bag of unquantized features
        BagOfFeatures unquantized_features;
        convert_mat_to_vector(descriptor_double, unquantized_features);

        //quantize regions -- true BagOfFeatures
        Histogram feature_vector;
        quant->quantize(unquantized_features, feature_vector);

        //aggregate
        vector_add(centroid, feature_vector);
    }

    //divide by training category size to compute centroid
    //std::transform(centroid.begin(), centroid.end(), centroid.begin(), std::bind1st(std::divides<double>(),bikes.size()));
    for(double& d : centroid){
        d = d/samples.size();
    }
    std::cout << double( clock() - start ) / (double)CLOCKS_PER_SEC<< " seconds." << std::endl;
}
示例#5
0
/* DoGカーネルによる特徴量の抽出 */
void SIFTExtractor::extract_using_dense()
{
    /*+ パラメータの算出 ++++++++++++++++*/
    Size image_size = Size(proc_image.cols, proc_image.rows); // 画像サイズの取得

    // 1. サンプリング間隔を求める
    double interval = sqrt((image_size.width * image_size.height) / (double)feature_num);

    // 2. スケールの決定(サンプリング間隔 interval / 2.0)
    double scale = interval / 2.0;

    // 3. interval間隔で横・縦ともに抽出できるサンプル数の算出
    int sample_col_num = (double)image_size.width  / floor(interval);
    int sample_row_num = (double)image_size.height / floor(interval);

    // 4. 画像の余白を算出
    int odd_cols, odd_rows;

    if( (image_size.width % sample_col_num == 0) && (image_size.height % sample_row_num == 0) )
    {
        //cout << "1"  << endl;
        odd_cols = image_size.width -  ( (sample_col_num - 1)  * floor(interval) );
        odd_rows = image_size.height - ( (sample_row_num - 1) * floor(interval) );
    }
    else if( (image_size.width % sample_col_num == 0) && (image_size.height % sample_row_num != 0) )
    {
        //cout << "2"  << endl;
        odd_cols = image_size.width -  ( (sample_col_num - 1) * floor(interval) );
        odd_rows = image_size.height - ( sample_row_num * floor(interval) );
    }
    else if( (image_size.width % sample_col_num == 0) && (image_size.height % sample_row_num == 0) )
    {
        //cout << "3"  << endl;
        odd_cols = image_size.width -  ( sample_col_num * floor(interval) );
        odd_rows = image_size.height - ( (sample_row_num - 1) * floor(interval) );
    }
    else
    {
        //cout << "4"  << endl;
        odd_cols = image_size.width -  ( sample_col_num * floor(interval) );
        odd_rows = image_size.height - ( sample_row_num * floor(interval) );
    }

    // 5. 左上から何pixelシフトした位置から特徴量を抽出するかを算出
    double sift = sqrt((odd_cols * odd_rows) / 4.0);

    // 6. 正方形の画像の場合の例外処理
    if( image_size.width == image_size.height)
    {
        int tmp = (double)image_size.width - (floor(sift) * 2.0) - (floor(interval) * (sqrt(feature_num) - 1.0));
        if(tmp == 0)
            sift--;
        else
            sift += tmp/2;
    }
    /*
    cout << "Size (" << proc_image.cols << " * " << proc_image.rows << ")" << endl;
    cout << "interval:" << interval << endl;
    cout << "scale:" << scale << endl;
    cout << "sample_col_num:" << sample_col_num << endl;
    cout << "sample_row_num:" << sample_row_num << endl;
    cout << "odd_cols:" << odd_cols << endl;
    cout << "odd_rows:" << odd_rows << endl;
    cout << "sift:" << sift << endl;
    */

    /*+ パラメータの算出 ++++++++++++++++*/
    // 1. キーポイントの抽出
    while(true)
    {
        cv::DenseFeatureDetector detector(floor(scale), 1, 0.1f, floor(interval), floor(sift), true, false);
        detector.detect(proc_image, keypoints);

        // 指定した特徴量になるまでフィードバックループ
        if(keypoints.size() < feature_num)
        {
            sift--;
            if(sift < 0)
                cerr << "計算式がおかしい" << endl;
        }
        else if(keypoints.size() > feature_num)
        {
            sift++;
            if(sift < 0)
                cerr << "計算式がおかしい" << endl;
        }
        else
            break;

    }

    // 2. SIFT記述子の抽出
    extractor.compute(proc_image, keypoints, descriptors);

    //TODO: エラー処理

    // 抽出済みフラグをセット
    extraction = true;
}