void GpuSurfDetectorInternal::getDescriptors(std::vector<float> & outDescriptors)
  {
    int ftcount = m_features.ftCount();
    
    m_features.descriptorsMem().pullFromDevice();
    cudaThreadSynchronize();


    // Resize the destination buffer.
    outDescriptors.resize(descriptorSize() * ftcount);
    // Copy the descriptors into the buffer. AFAIK, all known std::vector implementations use
    // contiguous memory.
    memcpy(&outDescriptors[0],m_features.hostDescriptors(), descriptorSize() * ftcount * sizeof(float));
    
  }
Esempio n. 2
0
    void AKAZE::computeImpl(InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors) const
    {
        cv::Mat img = image.getMat();
        if (img.type() != CV_8UC1)
            cvtColor(image, img, COLOR_BGR2GRAY);

        Mat img1_32;
        img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);

        cv::Mat& desc = descriptors.getMatRef();

        AKAZEOptions options;
        options.descriptor = static_cast<DESCRIPTOR_TYPE>(descriptor);
        options.descriptor_channels = descriptor_channels;
        options.descriptor_size = descriptor_size;
        options.nsublevels = nsublevels;
        options.dthreshold = dtreshhold;
        options.img_width = img.cols;
        options.img_height = img.rows;

        AKAZEFeatures impl(options);
        impl.Create_Nonlinear_Scale_Space(img1_32);
        impl.Compute_Descriptors(keypoints, desc);

        CV_Assert((!desc.rows || desc.cols == descriptorSize()));
        CV_Assert((!desc.rows || (desc.type() == descriptorType())));
    }
Esempio n. 3
0
template <typename PointInT, typename PointNT, typename PointOutT> void
pcl::GFPFHEstimation<PointInT, PointNT, PointOutT>::computeDistanceHistogram (const std::vector<float>& distances,
        std::vector<float>& histogram)
{
    std::vector<float>::const_iterator min_it = std::min_element (distances.begin (), distances.end ());
    assert (min_it != distances.end ());
    const float min_value = *min_it;

    std::vector<float>::const_iterator max_it = std::max_element (distances.begin (), distances.end ());
    assert (max_it != distances.end());
    const float max_value = *max_it;

    histogram.resize (descriptorSize (), 0);

    const float range = max_value - min_value;
    const int max_bin = descriptorSize () - 1;
    for (size_t i = 0; i < distances.size (); ++i)
    {
        const float raw_bin = static_cast<float> (descriptorSize ()) * (distances[i] - min_value) / range;
        int bin = std::min (max_bin, static_cast<int> (floor (raw_bin)));
        histogram[bin] += 1;
    }
}
Esempio n. 4
0
void BOWImgDescriptorExtractor::compute( const Mat& image, std::vector<KeyPoint>& keypoints, Mat& imgDescriptor,
                                         std::vector<std::vector<int> >* pointIdxsOfClusters, Mat* _descriptors )
{
    imgDescriptor.release();

    if( keypoints.empty() )
        return;

    int clusterCount = descriptorSize(); // = vocabulary.rows

    // Compute descriptors for the image.
    Mat descriptors;
    dextractor->compute( image, keypoints, descriptors );

    // Match keypoint descriptors to cluster center (to vocabulary)
    std::vector<DMatch> matches;
    dmatcher->match( descriptors, matches );

    // Compute image descriptor
    if( pointIdxsOfClusters )
    {
        pointIdxsOfClusters->clear();
        pointIdxsOfClusters->resize(clusterCount);
    }

    imgDescriptor = Mat( 1, clusterCount, descriptorType(), Scalar::all(0.0) );
    float *dptr = (float*)imgDescriptor.data;
    for( size_t i = 0; i < matches.size(); i++ )
    {
        int queryIdx = matches[i].queryIdx;
        int trainIdx = matches[i].trainIdx; // cluster index
        CV_Assert( queryIdx == (int)i );

        dptr[trainIdx] = dptr[trainIdx] + 1.f;
        if( pointIdxsOfClusters )
            (*pointIdxsOfClusters)[trainIdx].push_back( queryIdx );
    }

    // Normalize image descriptor.
    imgDescriptor /= descriptors.rows;

    // Add the descriptors of image keypoints
    if (_descriptors) {
        *_descriptors = descriptors.clone();
    }
}
Esempio n. 5
0
void SIFT::operator()(InputArray _image, InputArray _mask,
                      vector<KeyPoint>& keypoints,
                      OutputArray _descriptors,
                      bool useProvidedKeypoints) const
{
    int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
    Mat image = _image.getMat(), mask = _mask.getMat();

    if( image.empty() || image.depth() != CV_8U )
        CV_Error( CV_StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );

    if( !mask.empty() && mask.type() != CV_8UC1 )
        CV_Error( CV_StsBadArg, "mask has incorrect type (!=CV_8UC1)" );

    if( useProvidedKeypoints )
    {
        firstOctave = 0;
        int maxOctave = INT_MIN;
        for( size_t i = 0; i < keypoints.size(); i++ )
        {
            int octave, layer;
            float scale;
            unpackOctave(keypoints[i], octave, layer, scale);
            firstOctave = std::min(firstOctave, octave);
            maxOctave = std::max(maxOctave, octave);
            actualNLayers = std::max(actualNLayers, layer-2);
        }

        firstOctave = std::min(firstOctave, 0);
        CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers );
        actualNOctaves = maxOctave - firstOctave + 1;
    }

    Mat base = createInitialImage(image, firstOctave < 0, (float)sigma);
    vector<Mat> gpyr, dogpyr;
    int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2) - firstOctave;

    //double t, tf = getTickFrequency();
    //t = (double)getTickCount();
    buildGaussianPyramid(base, gpyr, nOctaves);
    buildDoGPyramid(gpyr, dogpyr);

    //t = (double)getTickCount() - t;
    //printf("pyramid construction time: %g\n", t*1000./tf);

    if( !useProvidedKeypoints )
    {
        //t = (double)getTickCount();
        findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
        KeyPointsFilter::removeDuplicated( keypoints );

        if( nfeatures > 0 )
            KeyPointsFilter::retainBest(keypoints, nfeatures);
        //t = (double)getTickCount() - t;
        //printf("keypoint detection time: %g\n", t*1000./tf);

        if( firstOctave < 0 )
            for( size_t i = 0; i < keypoints.size(); i++ )
            {
                KeyPoint& kpt = keypoints[i];
                float scale = 1.f/(float)(1 << -firstOctave);
                kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
                kpt.pt *= scale;
                kpt.size *= scale;
            }

        if( !mask.empty() )
            KeyPointsFilter::runByPixelsMask( keypoints, mask );
    }
    else
    {
        // filter keypoints by mask
        //KeyPointsFilter::runByPixelsMask( keypoints, mask );
    }

    if( _descriptors.needed() )
    {
        //t = (double)getTickCount();
        int dsize = descriptorSize();
        _descriptors.create((int)keypoints.size(), dsize, CV_32F);
        Mat descriptors = _descriptors.getMat();

        calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave);
        //t = (double)getTickCount() - t;
        //printf("descriptor extraction time: %g\n", t*1000./tf);
    }
}
Esempio n. 6
0
    void KAZE::operator()(InputArray _image, InputArray _mask, vector<KeyPoint>& _keypoints,
        OutputArray _descriptors, bool useProvidedKeypoints) const
    {

        bool do_keypoints = !useProvidedKeypoints;
        bool do_descriptors = _descriptors.needed();

        if( (!do_keypoints && !do_descriptors) || _image.empty() )
            return;
        
        cv::Mat img1_8, img1_32;

        // Convert to gray scale iamge and float image
        if (_image.getMat().channels() == 3)
            cv::cvtColor(_image, img1_8, CV_RGB2GRAY);
        else
            _image.getMat().copyTo(img1_8);

        img1_8.convertTo(img1_32, CV_32F, 1.0/255.0,0);

        // Construct KAZE
        toptions opt = options;
        opt.img_width = img1_32.cols;
        opt.img_height = img1_32.rows;

        ::KAZE kazeEvolution(opt);

        // Create nonlinear scale space
        kazeEvolution.Create_Nonlinear_Scale_Space(img1_32);        

        // Feature detection
        std::vector<Ipoint> kazePoints;

        if (do_keypoints)
        {
            kazeEvolution.Feature_Detection(kazePoints);
            filterDuplicated(kazePoints);

            if (!_mask.empty())
            {
                filterByPixelsMask(kazePoints, _mask.getMat());
            }

            if (opt.nfeatures > 0)
            {
                filterRetainBest(kazePoints, opt.nfeatures);
            }
            
        }
        else
        {
            kazePoints.resize(_keypoints.size());

            #pragma omp parallel for
            for (int i = 0; i < kazePoints.size(); i++)
            {
                convertPoint(_keypoints[i], kazePoints[i]);    
            }
        }
        
        // Descriptor caculation
        if (do_descriptors)
        {
            kazeEvolution.Feature_Description(kazePoints);

            cv::Mat& descriptors = _descriptors.getMatRef();
            descriptors.create(kazePoints.size(), descriptorSize(), descriptorType());

            for (size_t i = 0; i < kazePoints.size(); i++)
            {
                std::copy(kazePoints[i].descriptor.begin(), kazePoints[i].descriptor.end(), (float*)descriptors.row(i).data);
            }
        }

        // Transfer from KAZE::Ipoint to cv::KeyPoint
        if (do_keypoints)
        {
            _keypoints.resize(kazePoints.size());

            #pragma omp parallel for
            for (int i = 0; i < kazePoints.size(); i++)
            {
                convertPoint(kazePoints[i], _keypoints[i]);
            }
        }
        
    }
Esempio n. 7
0
void LDB::compute( const cv::Mat& image,
                   std::vector<cv::KeyPoint>& _keypoints,
                   cv::Mat& _descriptors,
				   bool flag) const
{
    // Added to work with color images.
    cv::Mat _image;
    image.copyTo(_image);

	if(_image.empty() )
		return;

	//ROI handling
	int halfPatchSize = patchSize / 2;
	int border = halfPatchSize*1.415 + 1;
	if( _image.type() != CV_8UC1 )
		cvtColor(_image, _image, CV_BGR2GRAY);
	int levelsNum = 0;
	for( size_t i = 0; i < _keypoints.size(); i++ )
		levelsNum = std::max(levelsNum, std::max(_keypoints[i].octave, 0));
	levelsNum++;
	
	//Compute Orientation
    if(flag == 1){
       computeOrientation(_image, _keypoints, halfPatchSize);
    }

	// Pre-compute the scale pyramids
    std::vector<cv::Mat> imagePyramid(levelsNum);
	for (int level = 0; level < levelsNum; ++level)
	{
		float scale = 1/getScale(level, firstLevel, scaleFactor);
        cv::Size sz(cvRound(_image.cols*scale), cvRound(_image.rows*scale));
        cv::Size wholeSize(sz.width + border*2, sz.height + border*2);
        cv::Mat temp(wholeSize, _image.type()), masktemp;
        imagePyramid[level] = temp(cv::Rect(border, border, sz.width, sz.height));

		// Compute the resized image
		if( level != firstLevel )
		{
			if( level < firstLevel )
                resize(_image, imagePyramid[level], sz, 0, 0, cv::INTER_LINEAR);
			else
                resize(imagePyramid[level-1], imagePyramid[level], sz, 0, 0, cv::INTER_LINEAR);

            cv::copyMakeBorder(imagePyramid[level], temp, border, border, border, border,
                cv::BORDER_REFLECT_101+cv::BORDER_ISOLATED);
		}
		else
            cv::copyMakeBorder(_image, temp, border, border, border, border,
                cv::BORDER_REFLECT_101);
	}

	// Pre-compute the keypoints (we keep the best over all scales, so this has to be done beforehand
    std::vector < std::vector<cv::KeyPoint> > allKeypoints;

	// Cluster the input keypoints depending on the level they were computed at
	allKeypoints.resize(levelsNum);
    for (std::vector<cv::KeyPoint>::iterator keypoint = _keypoints.begin(),
		keypointEnd = _keypoints.end(); keypoint != keypointEnd; ++keypoint)
		allKeypoints[keypoint->octave].push_back(*keypoint);

	// Make sure we rescale the coordinates
	for (int level = 0; level < levelsNum; ++level)
	{
		if (level == firstLevel)
			continue;

        std::vector<cv::KeyPoint> & keypoints = allKeypoints[level];
		float scale = 1/getScale(level, firstLevel, scaleFactor);
        for (std::vector<cv::KeyPoint>::iterator keypoint = keypoints.begin(),
			keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
			keypoint->pt *= scale;
	}

    cv::Mat descriptors;
    std::vector<cv::Point> pattern;

	int nkeypoints = 0;
	for (int level = 0; level < levelsNum; ++level){
        std::vector<cv::KeyPoint>& keypoints = allKeypoints[level];
        cv::Mat& workingmat = imagePyramid[level];
		if(keypoints.size() > 1)
            cv::KeyPointsFilter::runByImageBorder(keypoints, workingmat.size(), border);

		nkeypoints += keypoints.size();
	}
	if( nkeypoints == 0 )
		_descriptors.release();
	else
	{
		_descriptors.create(nkeypoints, descriptorSize(), CV_8U);
		descriptors = _descriptors;
	}

	_keypoints.clear();
	int offset = 0;
	for (int level = 0; level < levelsNum; ++level)
	{
		// preprocess the resized image
        cv::Mat& workingmat = imagePyramid[level];
		// Get the features and compute their orientation
        std::vector<cv::KeyPoint>& keypoints = allKeypoints[level];
		if(keypoints.size() > 1)
            cv::KeyPointsFilter::runByImageBorder(keypoints, workingmat.size(), border);
		int nkeypoints = (int)keypoints.size();

		// Compute the descriptors
        cv::Mat desc;
		if (!descriptors.empty())
		{
			desc = descriptors.rowRange(offset, offset + nkeypoints);
		}

		offset += nkeypoints;
        //boxFilter(working_cv::Mat, working_cv::Mat, working_cv::Mat.depth(), Size(5,5), Point(-1,-1), true, BORDER_REFLECT_101);
        GaussianBlur(workingmat, workingmat, cv::Size(7, 7), 2, 2, cv::BORDER_REFLECT_101);
        cv::Mat integral_image;
        integral(workingmat, integral_image, CV_32S);
        computeDescriptors(workingmat, integral_image, patchSize, keypoints, desc, descriptorSize(), flag);

		// Copy to the output data
		if (level != firstLevel)
		{
			float scale = getScale(level, firstLevel, scaleFactor);
            for (std::vector<cv::KeyPoint>::iterator keypoint = keypoints.begin(),
				keypointEnd = keypoints.end(); keypoint != keypointEnd; ++keypoint)
				keypoint->pt *= scale;
		}
		// And add the keypoints to the output
		_keypoints.insert(_keypoints.end(), keypoints.begin(), keypoints.end());
	}
	
}