void compute_bow_histogram(cv::Mat &sample, Histogram &feature_vector, cv::Ptr<cv::FeatureDetector> &detector, cv::SiftDescriptorExtractor &extractor, Quantization *quant){
    //detect keypoints
    std::vector<cv::KeyPoint> keypoints;
    detector->detect( sample, keypoints );

    //compute descriptor
    cv::Mat descriptor_uchar;
    extractor.compute(sample, keypoints, descriptor_uchar);

    cv::Mat descriptor_double;
    descriptor_uchar.convertTo(descriptor_double, CV_64F);

    //convert from mat to bag of unquantized features
    BagOfFeatures unquantized_features;
    convert_mat_to_vector(descriptor_double, unquantized_features);

    //quantize regions -- true BagOfFeatures
    quant->quantize(unquantized_features, feature_vector);
}
Histogram compute_histogram(std::string imFile, cv::Ptr<cv::FeatureDetector> &detector, cv::Ptr<cv::FeatureDetector> &detector2, cv::SiftDescriptorExtractor &extractor, Quantization *quant) {
    cv::Mat img = cv::imread(imFile);

    //detect SIFT keypoints
    std::vector<cv::KeyPoint> keypoints;
    detector->detect( img, keypoints );

    //detect MSER keypoints
    std::vector<cv::KeyPoint> keypoints2;
    detector2->detect( img, keypoints2 );

    //group them together
    for(cv::KeyPoint& keypoint : keypoints2) {
        keypoints.push_back(keypoint);
    }

    std::cout << " - keypoint_ct: " << keypoints.size() << std::endl;

    //compute descriptors
    cv::Mat descriptor_uchar;
    extractor.compute(img, keypoints, descriptor_uchar);

    cv::Mat descriptor_double;
    descriptor_uchar.convertTo(descriptor_double, CV_64F);

    //convert from mat to bag of unquantized features
    BagOfFeatures unquantized_features;
    convert_mat_to_vector(descriptor_double, unquantized_features);

    //quantize to form bag of words
    Histogram bag_of_words;
    quant->quantize(unquantized_features, bag_of_words);

    //normalize

    return bag_of_words;
}
//gets centroid for category from training images
void LocalDescriptorAndBagOfFeature::train_category(const std::vector<cv::Mat> &samples, Histogram &centroid, const cv::Ptr<cv::FeatureDetector> &detector, const cv::SiftDescriptorExtractor &extractor, Quantization *quant){
    clock_t start = clock();
    int i = 0;
    for(const cv::Mat& sample : samples){
        i++;
        std::cout << "converting img " << i << " of " << samples.size() << " to bag of features" << std::endl;

        //detect keypoints
        std::vector<cv::KeyPoint> keypoints;
        detector->detect( sample, keypoints );

        //compute descriptor
        cv::Mat descriptor_uchar;
        extractor.compute(sample, keypoints, descriptor_uchar);

        cv::Mat descriptor_double;
        descriptor_uchar.convertTo(descriptor_double, CV_64F);

        //convert from mat to bag of unquantized features
        BagOfFeatures unquantized_features;
        convert_mat_to_vector(descriptor_double, unquantized_features);

        //quantize regions -- true BagOfFeatures
        Histogram feature_vector;
        quant->quantize(unquantized_features, feature_vector);

        //aggregate
        vector_add(centroid, feature_vector);
    }

    //divide by training category size to compute centroid
    //std::transform(centroid.begin(), centroid.end(), centroid.begin(), std::bind1st(std::divides<double>(),bikes.size()));
    for(double& d : centroid){
        d = d/samples.size();
    }
    std::cout << double( clock() - start ) / (double)CLOCKS_PER_SEC<< " seconds." << std::endl;
}
Example #4
0
/* DoGカーネルによる特徴量の抽出 */
void SIFTExtractor::extract_using_dense()
{
    /*+ パラメータの算出 ++++++++++++++++*/
    Size image_size = Size(proc_image.cols, proc_image.rows); // 画像サイズの取得

    // 1. サンプリング間隔を求める
    double interval = sqrt((image_size.width * image_size.height) / (double)feature_num);

    // 2. スケールの決定(サンプリング間隔 interval / 2.0)
    double scale = interval / 2.0;

    // 3. interval間隔で横・縦ともに抽出できるサンプル数の算出
    int sample_col_num = (double)image_size.width  / floor(interval);
    int sample_row_num = (double)image_size.height / floor(interval);

    // 4. 画像の余白を算出
    int odd_cols, odd_rows;

    if( (image_size.width % sample_col_num == 0) && (image_size.height % sample_row_num == 0) )
    {
        //cout << "1"  << endl;
        odd_cols = image_size.width -  ( (sample_col_num - 1)  * floor(interval) );
        odd_rows = image_size.height - ( (sample_row_num - 1) * floor(interval) );
    }
    else if( (image_size.width % sample_col_num == 0) && (image_size.height % sample_row_num != 0) )
    {
        //cout << "2"  << endl;
        odd_cols = image_size.width -  ( (sample_col_num - 1) * floor(interval) );
        odd_rows = image_size.height - ( sample_row_num * floor(interval) );
    }
    else if( (image_size.width % sample_col_num == 0) && (image_size.height % sample_row_num == 0) )
    {
        //cout << "3"  << endl;
        odd_cols = image_size.width -  ( sample_col_num * floor(interval) );
        odd_rows = image_size.height - ( (sample_row_num - 1) * floor(interval) );
    }
    else
    {
        //cout << "4"  << endl;
        odd_cols = image_size.width -  ( sample_col_num * floor(interval) );
        odd_rows = image_size.height - ( sample_row_num * floor(interval) );
    }

    // 5. 左上から何pixelシフトした位置から特徴量を抽出するかを算出
    double sift = sqrt((odd_cols * odd_rows) / 4.0);

    // 6. 正方形の画像の場合の例外処理
    if( image_size.width == image_size.height)
    {
        int tmp = (double)image_size.width - (floor(sift) * 2.0) - (floor(interval) * (sqrt(feature_num) - 1.0));
        if(tmp == 0)
            sift--;
        else
            sift += tmp/2;
    }
    /*
    cout << "Size (" << proc_image.cols << " * " << proc_image.rows << ")" << endl;
    cout << "interval:" << interval << endl;
    cout << "scale:" << scale << endl;
    cout << "sample_col_num:" << sample_col_num << endl;
    cout << "sample_row_num:" << sample_row_num << endl;
    cout << "odd_cols:" << odd_cols << endl;
    cout << "odd_rows:" << odd_rows << endl;
    cout << "sift:" << sift << endl;
    */

    /*+ パラメータの算出 ++++++++++++++++*/
    // 1. キーポイントの抽出
    while(true)
    {
        cv::DenseFeatureDetector detector(floor(scale), 1, 0.1f, floor(interval), floor(sift), true, false);
        detector.detect(proc_image, keypoints);

        // 指定した特徴量になるまでフィードバックループ
        if(keypoints.size() < feature_num)
        {
            sift--;
            if(sift < 0)
                cerr << "計算式がおかしい" << endl;
        }
        else if(keypoints.size() > feature_num)
        {
            sift++;
            if(sift < 0)
                cerr << "計算式がおかしい" << endl;
        }
        else
            break;

    }

    // 2. SIFT記述子の抽出
    extractor.compute(proc_image, keypoints, descriptors);

    //TODO: エラー処理

    // 抽出済みフラグをセット
    extraction = true;
}