void cv::superres::Farneback::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
    Mat frame0 = ::getMat(_frame0, buf[0]);
    Mat frame1 = ::getMat(_frame1, buf[1]);

    CV_DbgAssert( frame1.type() == frame0.type() );
    CV_DbgAssert( frame1.size() == frame0.size() );

    Mat input0 = ::convertToType(frame0, CV_8U, 1, buf[2], buf[3]);
    Mat input1 = ::convertToType(frame1, CV_8U, 1, buf[4], buf[5]);

    if (!_flow2.needed() && _flow1.kind() != _InputArray::GPU_MAT)
    {
        call(input0, input1, _flow1);
        return;
    }

    call(input0, input1, flow);

    if (!_flow2.needed())
    {
        ::copy(_flow1, flow);
    }
    else
    {
        split(flow, flows);

        ::copy(_flow1, flows[0]);
        ::copy(_flow2, flows[1]);
    }
}
void cv::superres::Farneback_GPU::calc(InputArray _frame0, InputArray _frame1, OutputArray _flow1, OutputArray _flow2)
{
    GpuMat frame0 = ::getGpuMat(_frame0, buf[0]);
    GpuMat frame1 = ::getGpuMat(_frame1, buf[1]);

    CV_DbgAssert( frame1.type() == frame0.type() );
    CV_DbgAssert( frame1.size() == frame0.size() );

    GpuMat input0 = ::convertToType(frame0, CV_8U, 1, buf[2], buf[3]);
    GpuMat input1 = ::convertToType(frame1, CV_8U, 1, buf[4], buf[5]);

    if (_flow2.needed() && _flow1.kind() == _InputArray::GPU_MAT && _flow2.kind() == _InputArray::GPU_MAT)
    {
        call(input0, input1, _flow1.getGpuMatRef(), _flow2.getGpuMatRef());
        return;
    }

    call(input0, input1, u, v);

    if (_flow2.needed())
    {
        ::copy(_flow1, u);
        ::copy(_flow2, v);
    }
    else
    {
        GpuMat src[] = {u, v};
        gpu::merge(src, 2, flow);
        ::copy(_flow1, flow);
    }
}
Exemple #3
0
    Vec2d predict2(InputArray _sample, OutputArray _probs) const
    {
        int ptype = CV_64F;
        Mat sample = _sample.getMat();
        CV_Assert(isTrained());

        CV_Assert(!sample.empty());
        if(sample.type() != CV_64FC1)
        {
            Mat tmp;
            sample.convertTo(tmp, CV_64FC1);
            sample = tmp;
        }
        sample = sample.reshape(1, 1);

        Mat probs;
        if( _probs.needed() )
        {
            if( _probs.fixedType() )
                ptype = _probs.type();
            _probs.create(1, nclusters, ptype);
            probs = _probs.getMat();
        }

        return computeProbabilities(sample, !probs.empty() ? &probs : 0, ptype);
    }
Exemple #4
0
float SVMSGDImpl::predict( InputArray _samples, OutputArray _results, int ) const
{
    float result = 0;
    cv::Mat samples = _samples.getMat();
    int nSamples = samples.rows;
    cv::Mat results;

    CV_Assert( samples.cols == weights_.cols && samples.type() == CV_32FC1);

    if( _results.needed() )
    {
        _results.create( nSamples, 1, samples.type() );
        results = _results.getMat();
    }
    else
    {
        CV_Assert( nSamples == 1 );
        results = Mat(1, 1, CV_32FC1, &result);
    }

    for (int sampleIndex = 0; sampleIndex < nSamples; sampleIndex++)
    {
        Mat currentSample = samples.row(sampleIndex);
        float criterion = static_cast<float>(currentSample.dot(weights_)) + shift_;
        results.at<float>(sampleIndex) = (criterion >= 0) ? 1.f : -1.f;
    }

    return result;
}
Exemple #5
0
float AffineTransformerImpl::applyTransformation(InputArray inPts, OutputArray outPts)
{
    Mat pts1 = inPts.getMat();
    CV_Assert((pts1.channels()==2) && (pts1.cols>0));

    //Apply transformation in the complete set of points
    Mat fAffine;
    transform(pts1, fAffine, affineMat);

    // Ensambling output //
    if (outPts.needed())
    {
        outPts.create(1,fAffine.cols, CV_32FC2);
        Mat outMat = outPts.getMat();
        for (int i=0; i<fAffine.cols; i++)
            outMat.at<Point2f>(0,i)=fAffine.at<Point2f>(0,i);
    }

    // Updating Transform Cost //
    Mat Af(2, 2, CV_32F);
    Af.at<float>(0,0)=affineMat.at<float>(0,0);
    Af.at<float>(0,1)=affineMat.at<float>(1,0);
    Af.at<float>(1,0)=affineMat.at<float>(0,1);
    Af.at<float>(1,1)=affineMat.at<float>(1,1);
    SVD mysvd(Af, SVD::NO_UV);
    Mat singVals=mysvd.w;
    transformCost=std::log((singVals.at<float>(0,0)+FLT_MIN)/(singVals.at<float>(1,0)+FLT_MIN));

    return transformCost;
}
Exemple #6
0
    float predict(InputArray _inputs, OutputArray _outputs, int) const
    {
        bool needprobs = _outputs.needed();
        Mat samples = _inputs.getMat(), probs, probsrow;
        int ptype = CV_64F;
        float firstres = 0.f;
        int i, nsamples = samples.rows;

        if( needprobs )
        {
            if( _outputs.fixedType() )
                ptype = _outputs.type();
            _outputs.create(samples.rows, nclusters, ptype);
        }
        else
            nsamples = std::min(nsamples, 1);

        for( i = 0; i < nsamples; i++ )
        {
            if( needprobs )
                probsrow = probs.row(i);
            Vec2d res = computeProbabilities(samples.row(i), needprobs ? &probsrow : 0, ptype);
            if( i == 0 )
                firstres = (float)res[1];
        }
        return firstres;
    }
Exemple #7
0
double computeReprojectionErrors(InputArray points3D,
                                 InputArray points2D,
                                 InputArray cameraMatrix,
                                 InputArray distCoeffs,
                                 InputArray rvec,
                                 InputArray tvec,
                                 OutputArray _proj_points2D,
                                 vector<double> *individual_error)
{
  // set proper type for the output
  Mat x = points2D.getMat();
  Mat proj_points2D = _proj_points2D.getMat();
  proj_points2D.create(x.rows, x.cols, x.type());

  // project points
  projectPoints(points3D, rvec, tvec, cameraMatrix, distCoeffs, proj_points2D);

  // save output if it is needed (no default parameter)
  if (_proj_points2D.needed())
  {
    proj_points2D.copyTo(_proj_points2D);
  }

  // return error
  return calib::norm(x, proj_points2D, individual_error);
}
float StatModel::calcError( const Ptr<TrainData>& data, bool testerr, OutputArray _resp ) const
{
    CV_TRACE_FUNCTION_SKIP_NESTED();
    Mat samples = data->getSamples();
    int layout = data->getLayout();
    Mat sidx = testerr ? data->getTestSampleIdx() : data->getTrainSampleIdx();
    const int* sidx_ptr = sidx.ptr<int>();
    int i, n = (int)sidx.total();
    bool isclassifier = isClassifier();
    Mat responses = data->getResponses();
    int responses_type = responses.type();

    if( n == 0 )
        n = data->getNSamples();

    if( n == 0 )
        return -FLT_MAX;

    Mat resp;
    if( _resp.needed() )
        resp.create(n, 1, CV_32F);

    double err = 0;
    for( i = 0; i < n; i++ )
    {
        int si = sidx_ptr ? sidx_ptr[i] : i;
        Mat sample = layout == ROW_SAMPLE ? samples.row(si) : samples.col(si);
        float val = predict(sample);
        float val0 = (responses_type == CV_32S) ? (float)responses.at<int>(si) : responses.at<float>(si);

        if( isclassifier )
            err += fabs(val - val0) > FLT_EPSILON;
        else
            err += (val - val0)*(val - val0);
        if( !resp.empty() )
            resp.at<float>(i) = val;
        /*if( i < 100 )
        {
            printf("%d. ref %.1f vs pred %.1f\n", i, val0, val);
        }*/
    }

    if( _resp.needed() )
        resp.copyTo(_resp);

    return (float)(err / n * (isclassifier ? 100 : 1));
}
Exemple #9
0
float cv::EMD( InputArray _signature1, InputArray _signature2,
               int distType, InputArray _cost,
               float* lowerBound, OutputArray _flow )
{
    Mat signature1 = _signature1.getMat(), signature2 = _signature2.getMat();
    Mat cost = _cost.getMat(), flow;

    CvMat _csignature1 = signature1;
    CvMat _csignature2 = signature2;
    CvMat _ccost = cost, _cflow;
    if( _flow.needed() )
    {
        _flow.create(signature1.rows, signature2.rows, CV_32F);
        flow = _flow.getMat();
        _cflow = flow;
    }

    return cvCalcEMD2( &_csignature1, &_csignature2, distType, 0, cost.empty() ? 0 : &_ccost,
                       _flow.needed() ? &_cflow : 0, lowerBound, 0 );
}
Exemple #10
0
        void detectAndCompute(InputArray image, InputArray mask,
                              std::vector<KeyPoint>& keypoints,
                              OutputArray descriptors,
                              bool useProvidedKeypoints)
        {
            Mat img = image.getMat();

            if (img_width != img.cols) {
                img_width = img.cols;
                impl.release();
            }

            if (img_height != img.rows) {
                img_height = img.rows;
                impl.release();
            }

            if (impl.empty()) {
                AKAZEOptionsV2 options;
                options.descriptor = descriptor;
                options.descriptor_channels = descriptor_channels;
                options.descriptor_size = descriptor_size;
                options.img_width = img_width;
                options.img_height = img_height;
                options.dthreshold = threshold;
                options.omax = octaves;
                options.nsublevels = sublevels;
                options.diffusivity = diffusivity;

                impl = makePtr<AKAZEFeaturesV2>(options);
            }

            impl->Create_Nonlinear_Scale_Space(img);

            if (!useProvidedKeypoints)
            {
                impl->Feature_Detection(keypoints);
            }

            if (!mask.empty())
            {
                KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
            }

            if( descriptors.needed() )
            {
                Mat& desc = descriptors.getMatRef();
                impl->Compute_Descriptors(keypoints, desc);

                CV_Assert((!desc.rows || desc.cols == descriptorSize()));
                CV_Assert((!desc.rows || (desc.type() == descriptorType())));
            }
        }
Exemple #11
0
        void detectAndCompute(InputArray image, InputArray mask,
                              std::vector<KeyPoint>& keypoints,
                              OutputArray descriptors,
                              bool useProvidedKeypoints)
        {
            Mat img = image.getMat();
            if (img.type() != CV_8UC1 && img.type() != CV_16UC1)
                cvtColor(image, img, COLOR_BGR2GRAY);

            Mat img1_32;
            if ( img.depth() == CV_32F )
                img1_32 = img;
            else if ( img.depth() == CV_8U )
                img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
            else if ( img.depth() == CV_16U )
                img.convertTo(img1_32, CV_32F, 1.0 / 65535.0, 0);

            CV_Assert( ! img1_32.empty() );

            AKAZEOptions options;
            options.descriptor = descriptor;
            options.descriptor_channels = descriptor_channels;
            options.descriptor_size = descriptor_size;
            options.img_width = img.cols;
            options.img_height = img.rows;
            options.dthreshold = threshold;
            options.omax = octaves;
            options.nsublevels = sublevels;
            options.diffusivity = diffusivity;

            AKAZEFeatures impl(options);
            impl.Create_Nonlinear_Scale_Space(img1_32);

            if (!useProvidedKeypoints)
            {
                impl.Feature_Detection(keypoints);
            }

            if (!mask.empty())
            {
                KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
            }

            if( descriptors.needed() )
            {
                Mat& desc = descriptors.getMatRef();
                impl.Compute_Descriptors(keypoints, desc);

                CV_Assert((!desc.rows || desc.cols == descriptorSize()));
                CV_Assert((!desc.rows || (desc.type() == descriptorType())));
            }
        }
void cv::gpu::GeneralizedHough_GPU::download(const GpuMat& d_positions, OutputArray h_positions_, OutputArray h_votes_)
{
    if (d_positions.empty())
    {
        h_positions_.release();
        if (h_votes_.needed())
            h_votes_.release();
        return;
    }

    CV_Assert(d_positions.rows == 2 && d_positions.type() == CV_32FC4);

    h_positions_.create(1, d_positions.cols, CV_32FC4);
    Mat h_positions = h_positions_.getMat();
    d_positions.row(0).download(h_positions);

    if (h_votes_.needed())
    {
        h_votes_.create(1, d_positions.cols, CV_32SC3);
        Mat h_votes = h_votes_.getMat();
        GpuMat d_votes(1, d_positions.cols, CV_32SC3, const_cast<int3*>(d_positions.ptr<int3>(1)));
        d_votes.download(h_votes);
    }
}
bool CustomPattern::findPattern(InputArray image, OutputArray matched_features, OutputArray pattern_points,
                                const double ratio, const double proj_error, const bool refine_position, OutputArray out,
                                OutputArray H, OutputArray pattern_corners)
{
    CV_Assert(!image.empty() && proj_error > 0);

    Mat img = image.getMat();
    vector<Point2f> m_ftrs;
    vector<Point3f> pattern_pts;
    Mat _H;
    vector<Point2f> scene_corners;
    if (!findPatternPass(img, m_ftrs, pattern_pts, _H, scene_corners, 0.6, proj_error, refine_position))
        return false; // pattern not found

    Mat mask = Mat::zeros(img.size(), CV_8UC1);
    vector<vector<Point> > obj(1);
    vector<Point> scorners_int(scene_corners.size());
    for (uint i = 0; i < scene_corners.size(); ++i)
        scorners_int[i] = (Point)scene_corners[i]; // for drawContours
    obj[0] = scorners_int;
    drawContours(mask, obj, 0, Scalar(255), FILLED);

    // Second pass
    Mat output;
    if (!findPatternPass(img, m_ftrs, pattern_pts, _H, scene_corners,
                         ratio, proj_error, refine_position, mask, output))
        return false; // pattern not found

    Mat(m_ftrs).copyTo(matched_features);
    Mat(pattern_pts).copyTo(pattern_points);
    if (out.needed()) output.copyTo(out);
    if (H.needed()) _H.copyTo(H);
    if (pattern_corners.needed()) Mat(scene_corners).copyTo(pattern_corners);

    return (!m_ftrs.empty());
}
bool CustomPattern::init(Mat& image, const float pixel_size, OutputArray output)
{
    image.copyTo(img_roi);
    //Setup object corners
    obj_corners = std::vector<Point2f>(4);
    obj_corners[0] = Point2f(0, 0); obj_corners[1] = Point2f(img_roi.cols, 0);
    obj_corners[2] = Point2f(img_roi.cols, img_roi.rows); obj_corners[3] = Point2f(0, img_roi.rows);

    if (!detector)   // if no detector chosen, use default
    {
        detector = FeatureDetector::create("ORB");
        detector->set("nFeatures", 2000);
        detector->set("scaleFactor", 1.15);
        detector->set("nLevels", 30);
    }

    detector->detect(img_roi, keypoints);
    if (keypoints.empty())
    {
        initialized = false;
        return initialized;
    }
    refineKeypointsPos(img_roi, keypoints);

    if (!descriptorExtractor)   // if no extractor chosen, use default
        descriptorExtractor = DescriptorExtractor::create("ORB");
    descriptorExtractor->compute(img_roi, keypoints, descriptor);

    if (!descriptorMatcher)
        descriptorMatcher = DescriptorMatcher::create("BruteForce-Hamming(2)");

    // Scale found points by pixelSize
    pxSize = pixel_size;
    scaleFoundPoints(pxSize, keypoints, points3d);

    if (output.needed())
    {
        Mat out;
        drawKeypoints(img_roi, keypoints, out, CV_RGB(255, 0, 0));
        out.copyTo(output);
    }

    initialized = !keypoints.empty();
    return initialized; // initialized if any keypoints are found
}
Exemple #15
0
void SparsePyrLkOptFlowEstimatorGpu::run(
        InputArray frame0, InputArray frame1, InputArray points0, InputOutputArray points1,
        OutputArray status, OutputArray errors)
{
    frame0_.upload(frame0.getMat());
    frame1_.upload(frame1.getMat());
    points0_.upload(points0.getMat());

    if (errors.needed())
    {
        run(frame0_, frame1_, points0_, points1_, status_, errors_);
        errors_.download(errors.getMatRef());
    }
    else
        run(frame0_, frame1_, points0_, points1_, status_);

    points1_.download(points1.getMatRef());
    status_.download(status.getMatRef());
}
void DensePyrLkOptFlowEstimatorGpu::run(
        InputArray frame0, InputArray frame1, InputOutputArray flowX, InputOutputArray flowY,
        OutputArray errors)
{
    frame0_.upload(frame0.getMat());
    frame1_.upload(frame1.getMat());

    optFlowEstimator_.winSize = winSize_;
    optFlowEstimator_.maxLevel = maxLevel_;
    if (errors.needed())
    {
        optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_, &errors_);
        errors_.download(errors.getMatRef());
    }
    else
        optFlowEstimator_.dense(frame0_, frame1_, flowX_, flowY_);

    flowX_.download(flowX.getMatRef());
    flowY_.download(flowY.getMatRef());
}
Exemple #17
0
cv::Mat cv::findHomography( InputArray _points1, InputArray _points2,
                            int method, double ransacReprojThreshold, OutputArray _mask )
{
    Mat points1 = _points1.getMat(), points2 = _points2.getMat();
    int npoints = points1.checkVector(2);
    CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
               points1.type() == points2.type());
    
    Mat H(3, 3, CV_64F);
    CvMat _pt1 = points1, _pt2 = points2;
    CvMat matH = H, c_mask, *p_mask = 0;
    if( _mask.needed() )
    {
        _mask.create(npoints, 1, CV_8U, -1, true);
        p_mask = &(c_mask = _mask.getMat());
    }
    bool ok = cvFindHomography( &_pt1, &_pt2, &matH, method, ransacReprojThreshold, p_mask ) > 0;
    if( !ok )
        H = Scalar(0);
    return H;
}
Exemple #18
0
float ThinPlateSplineShapeTransformerImpl::applyTransformation(InputArray inPts, OutputArray outPts)
{
    CV_Assert(tpsComputed);
    Mat pts1 = inPts.getMat();
    CV_Assert((pts1.channels()==2) && (pts1.cols>0));

    //Apply transformation in the complete set of points
    // Ensambling output //
    if (outPts.needed())
    {
        outPts.create(1,pts1.cols, CV_32FC2);
        Mat outMat = outPts.getMat();
        for (int i=0; i<pts1.cols; i++)
        {
            Point2f pt=pts1.at<Point2f>(0,i);
            outMat.at<Point2f>(0,i)=_applyTransformation(shapeReference, pt, tpsParameters);
        }
    }

    return transformCost;
}
//static
void QualityBRISQUE::computeFeatures(InputArray img, OutputArray features)
{
    CV_Assert(features.needed());
    CV_Assert(img.isMat());
    CV_Assert(!img.getMat().empty());

    auto mat = mat_convert(img.getMat());

    const auto vals = ComputeBrisqueFeature(mat);
    cv::Mat valmat( cv::Size( (int)vals.size(), 1 ), CV_32FC1, (void*)vals.data()); // create row vector, type depends on brisque_calc_element_type

    if (features.isUMat())
        valmat.copyTo(features.getUMatRef());
    else if (features.isMat())
        // how to move data instead?
        // if calling this:
        //      features.getMatRef() = valmat;
        //  then shared data is erased when valmat is released, corrupting the data in the outputarray for the caller
        valmat.copyTo(features.getMatRef());
    else
        CV_Error(cv::Error::StsNotImplemented, "Unsupported output type");
}
Exemple #20
0
// gliese581h suggested filling a cv::Mat with descriptors to enable BFmatcher compatibility
// speed-ups and enhancements by gliese581h
void LUCIDImpl::compute(InputArray _src, std::vector<KeyPoint> &keypoints, OutputArray _desc) {
    cv::Mat src_input = _src.getMat();
    if (src_input.empty())
        return;
    CV_Assert(src_input.depth() == CV_8U && src_input.channels() == 3);

    Mat_<Vec3b> src;

    blur(src_input, src, cv::Size(b_kernel, b_kernel));

    int x, y, j, d, p, m = (l_kernel*2+1)*(l_kernel*2+1)*3, width = src.cols, height = src.rows, r, c;

    Mat_<uchar> desc(static_cast<int>(keypoints.size()), m);

    for (std::size_t i = 0; i < keypoints.size(); ++i) {
        x = static_cast<int>(keypoints[i].pt.x)-l_kernel, y = static_cast<int>(keypoints[i].pt.y)-l_kernel, d = x+2*l_kernel, p = y+2*l_kernel, j = x, r = static_cast<int>(i), c = 0;

        while (x <= d) {
            Vec3b &pix = src((y < 0 ? height+y : y >= height ? y-height : y), (x < 0 ? width+x : x >= width ? x-width : x));

            desc(r, c++) = pix[0];
            desc(r, c++) = pix[1];
            desc(r, c++) = pix[2];

            ++x;
            if (x > d) {
                if (y < p) {
                    ++y;
                    x = j;
                }
                else
                    break;
            }
        }
    }

    if (_desc.needed())
        sort(desc, _desc, SORT_EVERY_ROW | SORT_ASCENDING);
}
Exemple #21
0
cv::Mat cv::findFundamentalMat( InputArray _points1, InputArray _points2,
                               int method, double param1, double param2,
                               OutputArray _mask )
{
    Mat points1 = _points1.getMat(), points2 = _points2.getMat();
    int npoints = points1.checkVector(2);
    CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
              points1.type() == points2.type());
    
    Mat F(3, 3, CV_64F);
    CvMat _pt1 = points1, _pt2 = points2;
    CvMat matF = F, c_mask, *p_mask = 0;
    if( _mask.needed() )
    {
        _mask.create(npoints, 1, CV_8U, -1, true);
        p_mask = &(c_mask = _mask.getMat());
    }
    int n = cvFindFundamentalMat( &_pt1, &_pt2, &matF, method, param1, param2, p_mask );
    if( n <= 0 )
        F = Scalar(0);
    return F;
}
Exemple #22
0
    float predict( InputArray _inputs, OutputArray _outputs, int ) const
    {
        if( !trained )
            CV_Error( CV_StsError, "The network has not been trained or loaded" );

        Mat inputs = _inputs.getMat();
        int type = inputs.type(), l_count = layer_count();
        int n = inputs.rows, dn0 = n;

        CV_Assert( (type == CV_32F || type == CV_64F) && inputs.cols == layer_sizes[0] );
        int noutputs = layer_sizes[l_count-1];
        Mat outputs;

        int min_buf_sz = 2*max_lsize;
        int buf_sz = n*min_buf_sz;

        if( buf_sz > max_buf_sz )
        {
            dn0 = max_buf_sz/min_buf_sz;
            dn0 = std::max( dn0, 1 );
            buf_sz = dn0*min_buf_sz;
        }

        cv::AutoBuffer<double> _buf(buf_sz+noutputs);
        double* buf = _buf;

        if( !_outputs.needed() )
        {
            CV_Assert( n == 1 );
            outputs = Mat(n, noutputs, type, buf + buf_sz);
        }
        else
        {
            _outputs.create(n, noutputs, type);
            outputs = _outputs.getMat();
        }

        int dn = 0;
        for( int i = 0; i < n; i += dn )
        {
            dn = std::min( dn0, n - i );

            Mat layer_in = inputs.rowRange(i, i + dn);
            Mat layer_out( dn, layer_in.cols, CV_64F, buf);

            scale_input( layer_in, layer_out );
            layer_in = layer_out;

            for( int j = 1; j < l_count; j++ )
            {
                double* data = buf + ((j&1) ? max_lsize*dn0 : 0);
                int cols = layer_sizes[j];

                layer_out = Mat(dn, cols, CV_64F, data);
                Mat w = weights[j].rowRange(0, layer_in.cols);
                gemm(layer_in, w, 1, noArray(), 0, layer_out);
                calc_activ_func( layer_out, weights[j] );

                layer_in = layer_out;
            }

            layer_out = outputs.rowRange(i, i + dn);
            scale_output( layer_in, layer_out );
        }

        if( n == 1 )
        {
            int maxIdx[] = {0, 0};
            minMaxIdx(outputs, 0, 0, 0, maxIdx);
            return (float)(maxIdx[0] + maxIdx[1]);
        }

        return 0.f;
    }
Exemple #23
0
    int recoverPose( InputArray E, InputArray _points1, InputArray _points2, InputArray _cameraMatrix,
                         OutputArray _R, OutputArray _t, InputOutputArray _mask)
    {

        Mat points1, points2, cameraMatrix;
        _points1.getMat().convertTo(points1, CV_64F);
        _points2.getMat().convertTo(points2, CV_64F);
        _cameraMatrix.getMat().convertTo(cameraMatrix, CV_64F);

        int npoints = points1.checkVector(2);
        CV_Assert( npoints >= 0 && points2.checkVector(2) == npoints &&
                                  points1.type() == points2.type());

        CV_Assert(cameraMatrix.rows == 3 && cameraMatrix.cols == 3 && cameraMatrix.channels() == 1);

        if (points1.channels() > 1)
        {
            points1 = points1.reshape(1, npoints);
            points2 = points2.reshape(1, npoints);
        }

        double fx = cameraMatrix.at<double>(0,0);
        double fy = cameraMatrix.at<double>(1,1);
        double cx = cameraMatrix.at<double>(0,2);
        double cy = cameraMatrix.at<double>(1,2);

        points1.col(0) = (points1.col(0) - cx) / fx;
        points2.col(0) = (points2.col(0) - cx) / fx;
        points1.col(1) = (points1.col(1) - cy) / fy;
        points2.col(1) = (points2.col(1) - cy) / fy;

        points1 = points1.t();
        points2 = points2.t();

        Mat R1, R2, t;
        decomposeEssentialMat(E, R1, R2, t);
        Mat P0 = Mat::eye(3, 4, R1.type());
        Mat P1(3, 4, R1.type()), P2(3, 4, R1.type()), P3(3, 4, R1.type()), P4(3, 4, R1.type());
        P1(Range::all(), Range(0, 3)) = R1 * 1.0; P1.col(3) = t * 1.0;
        P2(Range::all(), Range(0, 3)) = R2 * 1.0; P2.col(3) = t * 1.0;
        P3(Range::all(), Range(0, 3)) = R1 * 1.0; P3.col(3) = -t * 1.0;
        P4(Range::all(), Range(0, 3)) = R2 * 1.0; P4.col(3) = -t * 1.0;

        // Do the cheirality check.
        // Notice here a threshold dist is used to filter
        // out far away points (i.e. infinite points) since
        // there depth may vary between postive and negtive.
        double dist = 50.0;
        Mat Q;
        triangulatePoints(P0, P1, points1, points2, Q);
        Mat mask1 = Q.row(2).mul(Q.row(3)) > 0;
        Q.row(0) /= Q.row(3);
        Q.row(1) /= Q.row(3);
        Q.row(2) /= Q.row(3);
        Q.row(3) /= Q.row(3);
        mask1 = (Q.row(2) < dist) & mask1;
        Q = P1 * Q;
        mask1 = (Q.row(2) > 0) & mask1;
        mask1 = (Q.row(2) < dist) & mask1;

        triangulatePoints(P0, P2, points1, points2, Q);
        Mat mask2 = Q.row(2).mul(Q.row(3)) > 0;
        Q.row(0) /= Q.row(3);
        Q.row(1) /= Q.row(3);
        Q.row(2) /= Q.row(3);
        Q.row(3) /= Q.row(3);
        mask2 = (Q.row(2) < dist) & mask2;
        Q = P2 * Q;
        mask2 = (Q.row(2) > 0) & mask2;
        mask2 = (Q.row(2) < dist) & mask2;

        triangulatePoints(P0, P3, points1, points2, Q);
        Mat mask3 = Q.row(2).mul(Q.row(3)) > 0;
        Q.row(0) /= Q.row(3);
        Q.row(1) /= Q.row(3);
        Q.row(2) /= Q.row(3);
        Q.row(3) /= Q.row(3);
        mask3 = (Q.row(2) < dist) & mask3;
        Q = P3 * Q;
        mask3 = (Q.row(2) > 0) & mask3;
        mask3 = (Q.row(2) < dist) & mask3;

        triangulatePoints(P0, P4, points1, points2, Q);
        Mat mask4 = Q.row(2).mul(Q.row(3)) > 0;
        Q.row(0) /= Q.row(3);
        Q.row(1) /= Q.row(3);
        Q.row(2) /= Q.row(3);
        Q.row(3) /= Q.row(3);
        mask4 = (Q.row(2) < dist) & mask4;
        Q = P4 * Q;
        mask4 = (Q.row(2) > 0) & mask4;
        mask4 = (Q.row(2) < dist) & mask4;

        mask1 = mask1.t();
        mask2 = mask2.t();
        mask3 = mask3.t();
        mask4 = mask4.t();

        // If _mask is given, then use it to filter outliers.
        if (!_mask.empty())
        {
            Mat mask = _mask.getMat();
            CV_Assert(mask.size() == mask1.size());
            bitwise_and(mask, mask1, mask1);
            bitwise_and(mask, mask2, mask2);
            bitwise_and(mask, mask3, mask3);
            bitwise_and(mask, mask4, mask4);
        }
        if (_mask.empty() && _mask.needed())
        {
            _mask.create(mask1.size(), CV_8U);
        }

        CV_Assert(_R.needed() && _t.needed());
        _R.create(3, 3, R1.type());
        _t.create(3, 1, t.type());

        int good1 = countNonZero(mask1);
        int good2 = countNonZero(mask2);
        int good3 = countNonZero(mask3);
        int good4 = countNonZero(mask4);

        if (good1 >= good2 && good1 >= good3 && good1 >= good4)
        {
            R1.copyTo(_R);
            t.copyTo(_t);
            if (_mask.needed()) mask1.copyTo(_mask);
            return good1;
        }
        else if (good2 >= good1 && good2 >= good3 && good2 >= good4)
        {
            R2.copyTo(_R);
            t.copyTo(_t);
            if (_mask.needed()) mask2.copyTo(_mask);
            return good2;
        }
        else if (good3 >= good1 && good3 >= good2 && good3 >= good4)
        {
            t = -t;
            R1.copyTo(_R);
            t.copyTo(_t);
            if (_mask.needed()) mask3.copyTo(_mask);
            return good3;
        }
        else
        {
            t = -t;
            R2.copyTo(_R);
            t.copyTo(_t);
            if (_mask.needed()) mask4.copyTo(_mask);
            return good4;
        }
    }
Exemple #24
0
bool solvePnPRansac(InputArray _opoints, InputArray _ipoints,
                        InputArray _cameraMatrix, InputArray _distCoeffs,
                        OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
                        int iterationsCount, float reprojectionError, double confidence,
                        OutputArray _inliers, int flags)
{
    CV_INSTRUMENT_REGION()

    Mat opoints0 = _opoints.getMat(), ipoints0 = _ipoints.getMat();
    Mat opoints, ipoints;
    if( opoints0.depth() == CV_64F || !opoints0.isContinuous() )
        opoints0.convertTo(opoints, CV_32F);
    else
        opoints = opoints0;
    if( ipoints0.depth() == CV_64F || !ipoints0.isContinuous() )
        ipoints0.convertTo(ipoints, CV_32F);
    else
        ipoints = ipoints0;

    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );

    CV_Assert(opoints.isContinuous());
    CV_Assert(opoints.depth() == CV_32F || opoints.depth() == CV_64F);
    CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
    CV_Assert(ipoints.isContinuous());
    CV_Assert(ipoints.depth() == CV_32F || ipoints.depth() == CV_64F);
    CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);

    _rvec.create(3, 1, CV_64FC1);
    _tvec.create(3, 1, CV_64FC1);

    Mat rvec = useExtrinsicGuess ? _rvec.getMat() : Mat(3, 1, CV_64FC1);
    Mat tvec = useExtrinsicGuess ? _tvec.getMat() : Mat(3, 1, CV_64FC1);
    Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();

    int model_points = 5;
    int ransac_kernel_method = SOLVEPNP_EPNP;

    if( npoints == 4 )
    {
        model_points = 4;
        ransac_kernel_method = SOLVEPNP_P3P;
    }

    Ptr<PointSetRegistrator::Callback> cb; // pointer to callback
    cb = makePtr<PnPRansacCallback>( cameraMatrix, distCoeffs, ransac_kernel_method, useExtrinsicGuess, rvec, tvec);

    double param1 = reprojectionError;                // reprojection error
    double param2 = confidence;                       // confidence
    int param3 = iterationsCount;                     // number maximum iterations

    Mat _local_model(3, 2, CV_64FC1);
    Mat _mask_local_inliers(1, opoints.rows, CV_8UC1);

    // call Ransac
    int result = createRANSACPointSetRegistrator(cb, model_points,
        param1, param2, param3)->run(opoints, ipoints, _local_model, _mask_local_inliers);

    if( result > 0 )
    {
        vector<Point3d> opoints_inliers;
        vector<Point2d> ipoints_inliers;
        opoints = opoints.reshape(3);
        ipoints = ipoints.reshape(2);
        opoints.convertTo(opoints_inliers, CV_64F);
        ipoints.convertTo(ipoints_inliers, CV_64F);

        const uchar* mask = _mask_local_inliers.ptr<uchar>();
        int npoints1 = compressElems(&opoints_inliers[0], mask, 1, npoints);
        compressElems(&ipoints_inliers[0], mask, 1, npoints);

        opoints_inliers.resize(npoints1);
        ipoints_inliers.resize(npoints1);
        result = solvePnP(opoints_inliers, ipoints_inliers, cameraMatrix,
                          distCoeffs, rvec, tvec, false,
                          (flags == SOLVEPNP_P3P || flags == SOLVEPNP_AP3P) ? SOLVEPNP_EPNP : flags) ? 1 : -1;
    }

    if( result <= 0 || _local_model.rows <= 0)
    {
        _rvec.assign(rvec);    // output rotation vector
        _tvec.assign(tvec);    // output translation vector

        if( _inliers.needed() )
            _inliers.release();

        return false;
    }
    else
    {
        _rvec.assign(_local_model.col(0));    // output rotation vector
        _tvec.assign(_local_model.col(1));    // output translation vector
    }

    if(_inliers.needed())
    {
        Mat _local_inliers;
        for (int i = 0; i < npoints; ++i)
        {
            if((int)_mask_local_inliers.at<uchar>(i) != 0) // inliers mask
                _local_inliers.push_back(i);    // output inliers vector
        }
        _local_inliers.copyTo(_inliers);
    }
    return true;
}
Exemple #25
0
bool cv::solvePnPRansac(InputArray _opoints, InputArray _ipoints,
                        InputArray _cameraMatrix, InputArray _distCoeffs,
                        OutputArray _rvec, OutputArray _tvec, bool useExtrinsicGuess,
                        int iterationsCount, float reprojectionError, double confidence,
                        OutputArray _inliers, int flags)
{

    Mat opoints = _opoints.getMat(), ipoints = _ipoints.getMat();

    int npoints = std::max(opoints.checkVector(3, CV_32F), opoints.checkVector(3, CV_64F));
    CV_Assert( npoints >= 0 && npoints == std::max(ipoints.checkVector(2, CV_32F), ipoints.checkVector(2, CV_64F)) );

    CV_Assert(opoints.isContinuous());
    CV_Assert(opoints.depth() == CV_32F || opoints.depth() == CV_64F);
    CV_Assert((opoints.rows == 1 && opoints.channels() == 3) || opoints.cols*opoints.channels() == 3);
    CV_Assert(ipoints.isContinuous());
    CV_Assert(ipoints.depth() == CV_32F || ipoints.depth() == CV_64F);
    CV_Assert((ipoints.rows == 1 && ipoints.channels() == 2) || ipoints.cols*ipoints.channels() == 2);

    _rvec.create(3, 1, CV_64FC1);
    _tvec.create(3, 1, CV_64FC1);

    Mat rvec = useExtrinsicGuess ? _rvec.getMat() : Mat(3, 1, CV_64FC1);
    Mat tvec = useExtrinsicGuess ? _tvec.getMat() : Mat(3, 1, CV_64FC1);
    Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();

    Ptr<PointSetRegistrator::Callback> cb; // pointer to callback
    cb = makePtr<PnPRansacCallback>( cameraMatrix, distCoeffs, flags, useExtrinsicGuess, rvec, tvec);

    int model_points = 4;                             // minimum of number of model points
    if( flags == cv::SOLVEPNP_ITERATIVE ) model_points = 6;
    else if( flags == cv::SOLVEPNP_UPNP ) model_points = 6;
    else if( flags == cv::SOLVEPNP_EPNP ) model_points = 5;

    double param1 = reprojectionError;                // reprojection error
    double param2 = confidence;                       // confidence
    int param3 = iterationsCount;                     // number maximum iterations

    cv::Mat _local_model(3, 2, CV_64FC1);
    cv::Mat _mask_local_inliers(1, opoints.rows, CV_8UC1);

    // call Ransac
    int result = createRANSACPointSetRegistrator(cb, model_points, param1, param2, param3)->run(opoints, ipoints, _local_model, _mask_local_inliers);

    if( result <= 0 || _local_model.rows <= 0)
    {
        _rvec.assign(rvec);    // output rotation vector
        _tvec.assign(tvec);    // output translation vector

        if( _inliers.needed() )
            _inliers.release();

        return false;
    }
    else
    {
        _rvec.assign(_local_model.col(0));    // output rotation vector
        _tvec.assign(_local_model.col(1));    // output translation vector
    }

    if(_inliers.needed())
    {
        Mat _local_inliers;
        int count = 0;
        for (int i = 0; i < _mask_local_inliers.rows; ++i)
        {
            if((int)_mask_local_inliers.at<uchar>(i) == 1) // inliers mask
            {
                _local_inliers.push_back(count);    // output inliers vector
                count++;
            }
        }
        _local_inliers.copyTo(_inliers);
    }
    return true;
}
Exemple #26
0
float cv::intersectConvexConvex( InputArray _p1, InputArray _p2, OutputArray _p12, bool handleNested )
{
    CV_INSTRUMENT_REGION();

    Mat p1 = _p1.getMat(), p2 = _p2.getMat();
    CV_Assert( p1.depth() == CV_32S || p1.depth() == CV_32F );
    CV_Assert( p2.depth() == CV_32S || p2.depth() == CV_32F );

    int n = p1.checkVector(2, p1.depth(), true);
    int m = p2.checkVector(2, p2.depth(), true);

    CV_Assert( n >= 0 && m >= 0 );

    if( n < 2 || m < 2 )
    {
        _p12.release();
        return 0.f;
    }

    AutoBuffer<Point2f> _result(n*2 + m*2 + 1);
    Point2f *fp1 = _result.data(), *fp2 = fp1 + n;
    Point2f* result = fp2 + m;
    int orientation = 0;

    for( int k = 1; k <= 2; k++ )
    {
        Mat& p = k == 1 ? p1 : p2;
        int len = k == 1 ? n : m;
        Point2f* dst = k == 1 ? fp1 : fp2;

        Mat temp(p.size(), CV_MAKETYPE(CV_32F, p.channels()), dst);
        p.convertTo(temp, CV_32F);
        CV_Assert( temp.ptr<Point2f>() == dst );
        Point2f diff0 = dst[0] - dst[len-1];
        for( int i = 1; i < len; i++ )
        {
            double s = diff0.cross(dst[i] - dst[i-1]);
            if( s != 0 )
            {
                if( s < 0 )
                {
                    orientation++;
                    flip( temp, temp, temp.rows > 1 ? 0 : 1 );
                }
                break;
            }
        }
    }

    float area = 0.f;
    int nr = intersectConvexConvex_(fp1, n, fp2, m, result, &area);
    if( nr == 0 )
    {
        if( !handleNested )
        {
            _p12.release();
            return 0.f;
        }

        if( pointPolygonTest(_InputArray(fp1, n), fp2[0], false) >= 0 )
        {
            result = fp2;
            nr = m;
        }
        else if( pointPolygonTest(_InputArray(fp2, m), fp1[0], false) >= 0 )
        {
            result = fp1;
            nr = n;
        }
        else
        {
            _p12.release();
            return 0.f;
        }
        area = (float)contourArea(_InputArray(result, nr), false);
    }

    if( _p12.needed() )
    {
        Mat temp(nr, 1, CV_32FC2, result);
        // if both input contours were reflected,
        // let's orient the result as the input vectors
        if( orientation == 2 )
            flip(temp, temp, 0);

        temp.copyTo(_p12);
    }
    return (float)fabs(area);
}
Exemple #27
0
void SIFT::operator()(InputArray _image, InputArray _mask,
                      vector<KeyPoint>& keypoints,
                      OutputArray _descriptors,
                      bool useProvidedKeypoints) const
{
    int firstOctave = -1, actualNOctaves = 0, actualNLayers = 0;
    Mat image = _image.getMat(), mask = _mask.getMat();

    if( image.empty() || image.depth() != CV_8U )
        CV_Error( CV_StsBadArg, "image is empty or has incorrect depth (!=CV_8U)" );

    if( !mask.empty() && mask.type() != CV_8UC1 )
        CV_Error( CV_StsBadArg, "mask has incorrect type (!=CV_8UC1)" );

    if( useProvidedKeypoints )
    {
        firstOctave = 0;
        int maxOctave = INT_MIN;
        for( size_t i = 0; i < keypoints.size(); i++ )
        {
            int octave, layer;
            float scale;
            unpackOctave(keypoints[i], octave, layer, scale);
            firstOctave = std::min(firstOctave, octave);
            maxOctave = std::max(maxOctave, octave);
            actualNLayers = std::max(actualNLayers, layer-2);
        }

        firstOctave = std::min(firstOctave, 0);
        CV_Assert( firstOctave >= -1 && actualNLayers <= nOctaveLayers );
        actualNOctaves = maxOctave - firstOctave + 1;
    }

    Mat base = createInitialImage(image, firstOctave < 0, (float)sigma);
    vector<Mat> gpyr, dogpyr;
    int nOctaves = actualNOctaves > 0 ? actualNOctaves : cvRound(log( (double)std::min( base.cols, base.rows ) ) / log(2.) - 2) - firstOctave;

    //double t, tf = getTickFrequency();
    //t = (double)getTickCount();
    buildGaussianPyramid(base, gpyr, nOctaves);
    buildDoGPyramid(gpyr, dogpyr);

    //t = (double)getTickCount() - t;
    //printf("pyramid construction time: %g\n", t*1000./tf);

    if( !useProvidedKeypoints )
    {
        //t = (double)getTickCount();
        findScaleSpaceExtrema(gpyr, dogpyr, keypoints);
        KeyPointsFilter::removeDuplicated( keypoints );

        if( nfeatures > 0 )
            KeyPointsFilter::retainBest(keypoints, nfeatures);
        //t = (double)getTickCount() - t;
        //printf("keypoint detection time: %g\n", t*1000./tf);

        if( firstOctave < 0 )
            for( size_t i = 0; i < keypoints.size(); i++ )
            {
                KeyPoint& kpt = keypoints[i];
                float scale = 1.f/(float)(1 << -firstOctave);
                kpt.octave = (kpt.octave & ~255) | ((kpt.octave + firstOctave) & 255);
                kpt.pt *= scale;
                kpt.size *= scale;
            }

        if( !mask.empty() )
            KeyPointsFilter::runByPixelsMask( keypoints, mask );
    }
    else
    {
        // filter keypoints by mask
        //KeyPointsFilter::runByPixelsMask( keypoints, mask );
    }

    if( _descriptors.needed() )
    {
        //t = (double)getTickCount();
        int dsize = descriptorSize();
        _descriptors.create((int)keypoints.size(), dsize, CV_32F);
        Mat descriptors = _descriptors.getMat();

        calcDescriptors(gpyr, keypoints, descriptors, nOctaveLayers, firstOctave);
        //t = (double)getTickCount() - t;
        //printf("descriptor extraction time: %g\n", t*1000./tf);
    }
}
bool CustomPattern::findPatternPass(const Mat& image, vector<Point2f>& matched_features, vector<Point3f>& pattern_points,
                                    Mat& H, vector<Point2f>& scene_corners, const double pratio, const double proj_error,
                                    const bool refine_position, const Mat& mask, OutputArray output)
{
    if (!initialized) {return false; }
    matched_features.clear();
    pattern_points.clear();

    vector<vector<DMatch> > matches;
    vector<KeyPoint> f_keypoints;
    Mat f_descriptor;

    detector->detect(image, f_keypoints, mask);
    if (refine_position) refineKeypointsPos(image, f_keypoints);

    descriptorExtractor->compute(image, f_keypoints, f_descriptor);
    descriptorMatcher->knnMatch(f_descriptor, descriptor, matches, 2); // k = 2;
    vector<DMatch> good_matches;
    vector<Point2f> obj_points;

    for(int i = 0; i < f_descriptor.rows; ++i)
    {
        if(matches[i][0].distance < pratio * matches[i][1].distance)
        {
            const DMatch& dm = matches[i][0];
            good_matches.push_back(dm);
            // "keypoints1[matches[i].queryIdx] has a corresponding point in keypoints2[matches[i].trainIdx]"
            matched_features.push_back(f_keypoints[dm.queryIdx].pt);
            pattern_points.push_back(points3d[dm.trainIdx]);
            obj_points.push_back(keypoints[dm.trainIdx].pt);
        }
    }

    if (good_matches.size() < MIN_POINTS_FOR_H) return false;

    Mat h_mask;
    H = findHomography(obj_points, matched_features, RANSAC, proj_error, h_mask);
    if (H.empty())
    {
        // cout << "findHomography() returned empty Mat." << endl;
        return false;
    }

    for(unsigned int i = 0; i < good_matches.size(); ++i)
    {
        if(!h_mask.data[i])
        {
            deleteStdVecElem(good_matches, i);
            deleteStdVecElem(matched_features, i);
            deleteStdVecElem(pattern_points, i);
        }
    }

    if (good_matches.empty()) return false;

    uint numb_elem = good_matches.size();
    check_matches(matched_features, obj_points, good_matches, pattern_points, H);
    if (good_matches.empty() || numb_elem < good_matches.size()) return false;

    // Get the corners from the image
    scene_corners = vector<Point2f>(4);
    perspectiveTransform(obj_corners, scene_corners, H);

    // Check correctnes of H
    // Is it a convex hull?
    bool cConvex = isContourConvex(scene_corners);
    if (!cConvex) return false;

    // Is the hull too large or small?
    double scene_area = contourArea(scene_corners);
    if (scene_area < MIN_CONTOUR_AREA_PX) return false;
    double ratio = scene_area/img_roi.size().area();
    if ((ratio < MIN_CONTOUR_AREA_RATIO) ||
        (ratio > MAX_CONTOUR_AREA_RATIO)) return false;

    // Is any of the projected points outside the hull?
    for(unsigned int i = 0; i < good_matches.size(); ++i)
    {
        if(pointPolygonTest(scene_corners, f_keypoints[good_matches[i].queryIdx].pt, false) < 0)
        {
            deleteStdVecElem(good_matches, i);
            deleteStdVecElem(matched_features, i);
            deleteStdVecElem(pattern_points, i);
        }
    }

    if (output.needed())
    {
        Mat out;
        drawMatches(image, f_keypoints, img_roi, keypoints, good_matches, out);
        // Draw lines between the corners (the mapped object in the scene - image_2 )
        line(out, scene_corners[0], scene_corners[1], Scalar(0, 255, 0), 2);
        line(out, scene_corners[1], scene_corners[2], Scalar(0, 255, 0), 2);
        line(out, scene_corners[2], scene_corners[3], Scalar(0, 255, 0), 2);
        line(out, scene_corners[3], scene_corners[0], Scalar(0, 255, 0), 2);
        out.copyTo(output);
    }

    return (!good_matches.empty()); // return true if there are enough good matches
}
Exemple #29
0
bool EM::doTrain(int startStep, OutputArray logLikelihoods, OutputArray labels, OutputArray probs)
{
    int dim = trainSamples.cols;
    // Precompute the empty initial train data in the cases of EM::START_E_STEP and START_AUTO_STEP
    if(startStep != EM::START_M_STEP)
    {
        if(covs.empty())
        {
            CV_Assert(weights.empty());
            clusterTrainSamples();
        }
    }

    if(!covs.empty() && covsEigenValues.empty() )
    {
        CV_Assert(invCovsEigenValues.empty());
        decomposeCovs();
    }

    if(startStep == EM::START_M_STEP)
        mStep();

    double trainLogLikelihood, prevTrainLogLikelihood = 0.;
    for(int iter = 0; ; iter++)
    {
        eStep();
        trainLogLikelihood = sum(trainLogLikelihoods)[0];

        if(iter >= maxIters - 1)
            break;

        double trainLogLikelihoodDelta = trainLogLikelihood - prevTrainLogLikelihood;
        if( iter != 0 &&
            (trainLogLikelihoodDelta < -DBL_EPSILON ||
             trainLogLikelihoodDelta < epsilon * std::fabs(trainLogLikelihood)))
            break;

        mStep();

        prevTrainLogLikelihood = trainLogLikelihood;
    }

    if( trainLogLikelihood <= -DBL_MAX/10000. )
    {
        clear();
        return false;
    }

    // postprocess covs
    covs.resize(nclusters);
    for(int clusterIndex = 0; clusterIndex < nclusters; clusterIndex++)
    {
        if(covMatType == EM::COV_MAT_SPHERICAL)
        {
            covs[clusterIndex].create(dim, dim, CV_64FC1);
            setIdentity(covs[clusterIndex], Scalar(covsEigenValues[clusterIndex].at<double>(0)));
        }
        else if(covMatType == EM::COV_MAT_DIAGONAL)
        {
            covs[clusterIndex] = Mat::diag(covsEigenValues[clusterIndex]);
        }
    }
    
    if(labels.needed())
        trainLabels.copyTo(labels);
    if(probs.needed())
        trainProbs.copyTo(probs);
    if(logLikelihoods.needed())
        trainLogLikelihoods.copyTo(logLikelihoods);
    
    trainSamples.release();
    trainProbs.release();
    trainLabels.release();
    trainLogLikelihoods.release();

    return true;
}
    void KAZE::operator()(InputArray _image, InputArray _mask, vector<KeyPoint>& _keypoints,
        OutputArray _descriptors, bool useProvidedKeypoints) const
    {

        bool do_keypoints = !useProvidedKeypoints;
        bool do_descriptors = _descriptors.needed();

        if( (!do_keypoints && !do_descriptors) || _image.empty() )
            return;
        
        cv::Mat img1_8, img1_32;

        // Convert to gray scale iamge and float image
        if (_image.getMat().channels() == 3)
            cv::cvtColor(_image, img1_8, CV_RGB2GRAY);
        else
            _image.getMat().copyTo(img1_8);

        img1_8.convertTo(img1_32, CV_32F, 1.0/255.0,0);

        // Construct KAZE
        toptions opt = options;
        opt.img_width = img1_32.cols;
        opt.img_height = img1_32.rows;

        ::KAZE kazeEvolution(opt);

        // Create nonlinear scale space
        kazeEvolution.Create_Nonlinear_Scale_Space(img1_32);        

        // Feature detection
        std::vector<Ipoint> kazePoints;

        if (do_keypoints)
        {
            kazeEvolution.Feature_Detection(kazePoints);
            filterDuplicated(kazePoints);

            if (!_mask.empty())
            {
                filterByPixelsMask(kazePoints, _mask.getMat());
            }

            if (opt.nfeatures > 0)
            {
                filterRetainBest(kazePoints, opt.nfeatures);
            }
            
        }
        else
        {
            kazePoints.resize(_keypoints.size());

            #pragma omp parallel for
            for (int i = 0; i < kazePoints.size(); i++)
            {
                convertPoint(_keypoints[i], kazePoints[i]);    
            }
        }
        
        // Descriptor caculation
        if (do_descriptors)
        {
            kazeEvolution.Feature_Description(kazePoints);

            cv::Mat& descriptors = _descriptors.getMatRef();
            descriptors.create(kazePoints.size(), descriptorSize(), descriptorType());

            for (size_t i = 0; i < kazePoints.size(); i++)
            {
                std::copy(kazePoints[i].descriptor.begin(), kazePoints[i].descriptor.end(), (float*)descriptors.row(i).data);
            }
        }

        // Transfer from KAZE::Ipoint to cv::KeyPoint
        if (do_keypoints)
        {
            _keypoints.resize(kazePoints.size());

            #pragma omp parallel for
            for (int i = 0; i < kazePoints.size(); i++)
            {
                convertPoint(kazePoints[i], _keypoints[i]);
            }
        }
        
    }