示例#1
0
void AffineAdaptedFeature2D::operator()(InputArray _image, InputArray _mask,
                                        vector<KeyPoint>& _keypoints,
                                        OutputArray _descriptors,
                                        bool useProvidedKeypoints) const
{
    Mat image = _image.getMat();
    Mat mask = _mask.getMat();

    CV_Assert(useProvidedKeypoints == false);
    CV_Assert(!affineTransformParams.empty());

    vector<vector<KeyPoint> > keypoints(affineTransformParams.size());
    vector<Mat> descriptors(affineTransformParams.size());

#pragma omp parallel for
    for(size_t paramsIndex = 0; paramsIndex < affineTransformParams.size(); paramsIndex++)
    {
        const Vec2f& params = affineTransformParams[paramsIndex];
        const float tilt = params[0];
        const float phi = params[1];

        if(tilt == 1.f) // tilt
        {
            detectAndComputeImpl(image, mask, keypoints[paramsIndex], descriptors[paramsIndex]);
        }
        else
        {
            Mat transformedImage, transformedMask;
            Mat Ainv = affineSkew(image, mask, tilt, phi, transformedImage, transformedMask);

            detectAndComputeImpl(transformedImage, transformedMask, keypoints[paramsIndex], descriptors[paramsIndex]);

            // correct keypoints coordinates
            CV_Assert(Ainv.type() == CV_32FC1);
            const float* Ainv_ptr = Ainv.ptr<const float>();
            for(size_t kpIndex = 0; kpIndex < keypoints[paramsIndex].size(); kpIndex++)
            {
                KeyPoint& kp = keypoints[paramsIndex][kpIndex];
                float tx = Ainv_ptr[0] * kp.pt.x + Ainv_ptr[1] * kp.pt.y + Ainv_ptr[2];
                float ty = Ainv_ptr[3] * kp.pt.x + Ainv_ptr[4] * kp.pt.y + Ainv_ptr[5];
                kp.pt.x = tx;
                kp.pt.y = ty;
            }
        }
    }

    // copy keypoints and descriptors to the output
    _keypoints.clear();
    Mat allDescriptors;
    for(size_t paramsIndex = 0; paramsIndex < affineTransformParams.size(); paramsIndex++)
    {
        _keypoints.insert(_keypoints.end(), keypoints[paramsIndex].begin(), keypoints[paramsIndex].end());
        allDescriptors.push_back(descriptors[paramsIndex]);
    }

    _descriptors.create(allDescriptors.size(), allDescriptors.type());
    Mat _descriptorsMat = _descriptors.getMat();
    allDescriptors.copyTo(_descriptorsMat);
}
void ASiftDetector::detectAndCompute(const Mat& img, std::vector< KeyPoint >& keypoints, Mat& descriptors)
{
    keypoints.clear();
    descriptors = Mat(0, 128, CV_32F);
    for(int tl = 1; tl < 6; tl++)
    {
        double t = pow(2, 0.5*tl);
        for(int phi = 0; phi < 180; phi += 72.0/t)
        {
            std::vector<KeyPoint> kps;
            Mat desc;

            Mat timg, mask, Ai;
            img.copyTo(timg);

            affineSkew(t, phi, timg, mask, Ai);

#if 0
            Mat img_disp;
            bitwise_and(mask, timg, img_disp);
            namedWindow( "Skew", WINDOW_AUTOSIZE );// Create a window for display.
            imshow( "Skew", img_disp );
            waitKey(0);
#endif

            SiftFeatureDetector detector;
            detector.detect(timg, kps, mask);

            SiftDescriptorExtractor extractor;
            extractor.compute(timg, kps, desc);

            for(unsigned int i = 0; i < kps.size(); i++)
            {
                Point3f kpt(kps[i].pt.x, kps[i].pt.y, 1);
                Mat kpt_t = Ai*Mat(kpt);
                kps[i].pt.x = kpt_t.at<float>(0,0);
                kps[i].pt.y = kpt_t.at<float>(1,0);
            }
            keypoints.insert(keypoints.end(), kps.begin(), kps.end());
            descriptors.push_back(desc);
        }
    }
}