コード例 #1
0
ファイル: ImageData.cpp プロジェクト: caomw/RoboStruct
void ImageData::SaveDescriptors(bool clear)
{
    if (m_descriptors.empty()) return;

    util::SaveContainerToFileBinary(m_filename + ".desc", m_descriptors);

    if (clear) ClearDescriptors();
}
コード例 #2
0
ファイル: plSDLMgr.cpp プロジェクト: NadnerbD/libhsplasma
void plSDLMgr::read(hsStream* S) {
    ClearDescriptors();
    fDescriptors.resize(S->readShort());
    for (size_t i=0; i<fDescriptors.size(); i++) {
        fDescriptors[i] = new plStateDescriptor();
        fDescriptors[i]->read(S);
    }

    // Propagate types on SDVars
    for (size_t i=0; i<fDescriptors.size(); i++) {
        plStateDescriptor* desc = fDescriptors[i];
        for (size_t j=0; j<desc->getNumVars(); j++) {
            plVarDescriptor* var = desc->get(j);
            if (var->getType() == plVarDescriptor::kStateDescriptor)
                var->setStateDesc(GetDescriptor(var->getStateDescType(),
                                                var->getStateDescVer()));
        }
    }
}
コード例 #3
0
ファイル: ImageData.cpp プロジェクト: caomw/RoboStruct
void ImageData::DetectFeatures(const Options& opts)
{
    // Cleanup data structures
    ClearDescriptors();
    m_keys.clear();

    std::vector<cv::KeyPoint> keys;
    const auto img = cv::imread(m_filename);
    cv::Mat grey_img;
    cv::cvtColor(img, grey_img, CV_BGR2GRAY);

    switch (opts.feature_type)
    {
    case 0: // Detect YAPE/Daisy features
        {
            cv::YAPE YapeDet{opts.yape_radius,
                             opts.yape_threshold,
                             opts.yape_octaves,
                             opts.yape_views,
                             opts.yape_base_feature_size,
                             opts.yape_clustering_distance};
            YapeDet(grey_img, keys);

            daisy DaisyDesc{};
            DaisyDesc.set_image(grey_img.data, m_height, m_width);
            DaisyDesc.set_parameters(opts.daisy_radius,
                                     opts.daisy_radius_quantization,
                                     opts.daisy_angular_quantization,
                                     opts.daisy_histogram_quantization);
            DaisyDesc.verbose(0);
            DaisyDesc.initialize_single_descriptor_mode();

            m_desc_size = DaisyDesc.descriptor_size();
            m_descriptors.reserve(m_keys.size() * m_desc_size);

            // Compute Daisy descriptors at provided locations
            for (const auto& key : keys)
            {
                std::vector<float> descriptor(m_desc_size);
                DaisyDesc.get_descriptor(key.pt.y, key.pt.x, 35, descriptor.data());
                m_descriptors.insert(m_descriptors.end(), descriptor.begin(), descriptor.end());
            }

            break;
        }
    case 1: // Detect SURF features
        {
            if (opts.surf_desc_extended)    m_desc_size = 128;
            else                            m_desc_size = 64;

            cv::SURF Surf{opts.surf_det_hessian_threshold,
                          opts.surf_common_octaves,
                          opts.surf_common_octave_layers,
                          opts.surf_desc_extended,
                          opts.surf_desc_upright};
            Surf(grey_img, cv::Mat{}, keys, m_descriptors);

            break;
        }
    case 2: // Detect AKAZE features
        {
            m_desc_size = opts.akaze_descriptor_size;
            cv::Mat working_img;
            grey_img.convertTo(working_img, CV_32F, 1.0 / 255.0, 0);

            AKAZEOptions options;
            options.img_width               = grey_img.cols;
            options.img_height              = grey_img.rows;
            options.soffset                 = DEFAULT_SCALE_OFFSET;
            options.omin                    = DEFAULT_OCTAVE_MIN;
            options.omax                    = DEFAULT_OCTAVE_MAX;
            options.nsublevels              = DEFAULT_NSUBLEVELS;
            options.dthreshold              = opts.akaze_threshold;
            options.diffusivity             = DEFAULT_DIFFUSIVITY_TYPE;
            options.descriptor              = DEFAULT_DESCRIPTOR;
            options.descriptor_size         = opts.akaze_descriptor_size;
            options.descriptor_channels     = DEFAULT_LDB_CHANNELS;
            options.descriptor_pattern_size = DEFAULT_LDB_PATTERN_SIZE;
            options.sderivatives            = DEFAULT_SIGMA_SMOOTHING_DERIVATIVES;
            options.save_scale_space        = DEFAULT_SAVE_SCALE_SPACE;
            options.save_keypoints          = DEFAULT_SAVE_KEYPOINTS;
            options.verbosity               = DEFAULT_VERBOSITY;

            AKAZE akaze{options};
            akaze.Create_Nonlinear_Scale_Space(working_img);
            akaze.Feature_Detection(keys);
            akaze.Compute_Descriptors(keys, m_descriptors_akaze);

            break;
        }
    }

    // Convert the keypoints from OpenCV's format to internal
    m_keys.reserve(keys.size());

    const float x_correction_factor = 0.5 * m_width;
    const float y_correction_factor = 0.5 * m_height - 1.0;

    for (const auto& key : keys)
    {
        const float x = key.pt.x - x_correction_factor;
        const float y = y_correction_factor - key.pt.y;

        const int xf = static_cast<int>(std::floor(key.pt.x)), yf = static_cast<int>(std::floor(key.pt.y));
        const uchar* ptr = img.ptr<uchar>(yf);

        m_keys.push_back(KeyPoint{x, y, ptr[3 * xf + 2], ptr[3 * xf + 1], ptr[3 * xf]});
    }

    SaveDescriptors(true);
}