void AKAZE::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const { cv::Mat img = image.getMat(); if (img.type() != CV_8UC1) cvtColor(image, img, COLOR_BGR2GRAY); Mat img1_32; img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0); cv::Mat& desc = descriptors.getMatRef(); AKAZEOptions options; options.descriptor = static_cast<DESCRIPTOR_TYPE>(descriptor); options.descriptor_channels = descriptor_channels; options.descriptor_size = descriptor_size; options.nsublevels = nsublevels; options.dthreshold = dtreshhold; options.img_width = img.cols; options.img_height = img.rows; AKAZEFeatures impl(options); impl.Create_Nonlinear_Scale_Space(img1_32); impl.Compute_Descriptors(keypoints, desc); CV_Assert((!desc.rows || desc.cols == descriptorSize())); CV_Assert((!desc.rows || (desc.type() == descriptorType()))); }
void BOWImgDescriptorExtractor::compute( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& imgDescriptor, std::vector<std::vector<int> >* pointIdxsOfClusters, Mat* _descriptors ) { imgDescriptor.release(); if( keypoints.empty() ) return; int clusterCount = descriptorSize(); // = vocabulary.rows // Compute descriptors for the image. Mat descriptors; dextractor->compute( image, keypoints, descriptors ); // Match keypoint descriptors to cluster center (to vocabulary) std::vector<DMatch> matches; dmatcher->match( descriptors, matches ); // Compute image descriptor if( pointIdxsOfClusters ) { pointIdxsOfClusters->clear(); pointIdxsOfClusters->resize(clusterCount); } imgDescriptor = Mat( 1, clusterCount, descriptorType(), Scalar::all(0.0) ); float *dptr = (float*)imgDescriptor.data; for( size_t i = 0; i < matches.size(); i++ ) { int queryIdx = matches[i].queryIdx; int trainIdx = matches[i].trainIdx; // cluster index CV_Assert( queryIdx == (int)i ); dptr[trainIdx] = dptr[trainIdx] + 1.f; if( pointIdxsOfClusters ) (*pointIdxsOfClusters)[trainIdx].push_back( queryIdx ); } // Normalize image descriptor. imgDescriptor /= descriptors.rows; // Add the descriptors of image keypoints if (_descriptors) { *_descriptors = descriptors.clone(); } }
void KAZE::operator()(InputArray _image, InputArray _mask, vector<KeyPoint>& _keypoints, OutputArray _descriptors, bool useProvidedKeypoints) const { bool do_keypoints = !useProvidedKeypoints; bool do_descriptors = _descriptors.needed(); if( (!do_keypoints && !do_descriptors) || _image.empty() ) return; cv::Mat img1_8, img1_32; // Convert to gray scale iamge and float image if (_image.getMat().channels() == 3) cv::cvtColor(_image, img1_8, CV_RGB2GRAY); else _image.getMat().copyTo(img1_8); img1_8.convertTo(img1_32, CV_32F, 1.0/255.0,0); // Construct KAZE toptions opt = options; opt.img_width = img1_32.cols; opt.img_height = img1_32.rows; ::KAZE kazeEvolution(opt); // Create nonlinear scale space kazeEvolution.Create_Nonlinear_Scale_Space(img1_32); // Feature detection std::vector<Ipoint> kazePoints; if (do_keypoints) { kazeEvolution.Feature_Detection(kazePoints); filterDuplicated(kazePoints); if (!_mask.empty()) { filterByPixelsMask(kazePoints, _mask.getMat()); } if (opt.nfeatures > 0) { filterRetainBest(kazePoints, opt.nfeatures); } } else { kazePoints.resize(_keypoints.size()); #pragma omp parallel for for (int i = 0; i < kazePoints.size(); i++) { convertPoint(_keypoints[i], kazePoints[i]); } } // Descriptor caculation if (do_descriptors) { kazeEvolution.Feature_Description(kazePoints); cv::Mat& descriptors = _descriptors.getMatRef(); descriptors.create(kazePoints.size(), descriptorSize(), descriptorType()); for (size_t i = 0; i < kazePoints.size(); i++) { std::copy(kazePoints[i].descriptor.begin(), kazePoints[i].descriptor.end(), (float*)descriptors.row(i).data); } } // Transfer from KAZE::Ipoint to cv::KeyPoint if (do_keypoints) { _keypoints.resize(kazePoints.size()); #pragma omp parallel for for (int i = 0; i < kazePoints.size(); i++) { convertPoint(kazePoints[i], _keypoints[i]); } } }