Ejemplo n.º 1
0
    void detect( InputArray _image, std::vector<KeyPoint>& keypoints, InputArray _mask )
    {
        CV_INSTRUMENT_REGION()

        std::vector<Point2f> corners;

        if (_image.isUMat())
        {
            UMat ugrayImage;
            if( _image.type() != CV_8U )
                cvtColor( _image, ugrayImage, COLOR_BGR2GRAY );
            else
                ugrayImage = _image.getUMat();

            goodFeaturesToTrack( ugrayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
                                 blockSize, useHarrisDetector, k );
        }
        else
        {
            Mat image = _image.getMat(), grayImage = image;
            if( image.type() != CV_8U )
                cvtColor( image, grayImage, COLOR_BGR2GRAY );

            goodFeaturesToTrack( grayImage, corners, nfeatures, qualityLevel, minDistance, _mask,
                                blockSize, useHarrisDetector, k );
        }

        keypoints.resize(corners.size());
        std::vector<Point2f>::const_iterator corner_it = corners.begin();
        std::vector<KeyPoint>::iterator keypoint_it = keypoints.begin();
        for( ; corner_it != corners.end(); ++corner_it, ++keypoint_it )
            *keypoint_it = KeyPoint( *corner_it, (float)blockSize );

    }
Ejemplo n.º 2
0
void cv::superres::arrCopy(InputArray src, OutputArray dst)
{
    if (dst.isUMat() || src.isUMat())
    {
        src.copyTo(dst);
        return;
    }

    typedef void (*func_t)(InputArray src, OutputArray dst);
    static const func_t funcs[10][10] =
    {
        { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
        { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
        { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
        { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
        { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
        { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
        { 0, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, mat2mat, arr2buf, 0, mat2gpu },
        { 0, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, buf2arr, 0, buf2arr },
        { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
        { 0, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, gpu2mat, arr2buf, 0 , gpu2gpu },
    };

    const int src_kind = src.kind() >> _InputArray::KIND_SHIFT;
    const int dst_kind = dst.kind() >> _InputArray::KIND_SHIFT;

    CV_Assert( src_kind >= 0 && src_kind < 10 );
    CV_Assert( dst_kind >= 0 && dst_kind < 10 );

    const func_t func = funcs[src_kind][dst_kind];
    CV_Assert( func != 0 );

    func(src, dst);
}
Ejemplo n.º 3
0
 static std::vector<Mat> extractMatVector(InputArray in)
 {
     if (in.isMat() || in.isUMat())
     {
         return std::vector<Mat>(1, in.getMat());
     }
     else if (in.isMatVector())
     {
         return *static_cast<const std::vector<Mat>*>(in.getObj());
     }
     else if (in.isUMatVector())
     {
         std::vector<Mat> vmat;
         in.getMatVector(vmat);
         return vmat;
     }
     else
     {
         CV_Assert(in.isMat() || in.isMatVector() || in.isUMat() || in.isUMatVector());
         return std::vector<Mat>();
     }
 }
Ejemplo n.º 4
0
void cv::fastNlMeansDenoisingColored( InputArray _src, OutputArray _dst,
                                      float h, float hForColorComponents,
                                      int templateWindowSize, int searchWindowSize)
{
    int type = _src.type(), depth = CV_MAT_DEPTH(type), cn = CV_MAT_CN(type);

    if (type != CV_8UC3 && type != CV_8UC4)
    {
        CV_Error(Error::StsBadArg, "Type of input image should be CV_8UC3!");
        return;
    }

    CV_OCL_RUN(_src.dims() <= 2 && (_dst.isUMat() || _src.isUMat()),
                ocl_fastNlMeansDenoisingColored(_src, _dst, h, hForColorComponents,
                                                templateWindowSize, searchWindowSize))

    Mat src = _src.getMat();
    _dst.create(src.size(), type);
    Mat dst = _dst.getMat();

    Mat src_lab;
    cvtColor(src, src_lab, COLOR_LBGR2Lab);

    Mat l(src.size(), CV_8U);
    Mat ab(src.size(), CV_8UC2);
    Mat l_ab[] = { l, ab };
    int from_to[] = { 0,0, 1,1, 2,2 };
    mixChannels(&src_lab, 1, l_ab, 2, from_to, 3);

    fastNlMeansDenoising(l, l, h, templateWindowSize, searchWindowSize);
    fastNlMeansDenoising(ab, ab, hForColorComponents, templateWindowSize, searchWindowSize);

    Mat l_ab_denoised[] = { l, ab };
    Mat dst_lab(src.size(), CV_MAKE_TYPE(depth, 3));
    mixChannels(l_ab_denoised, 2, &dst_lab, 1, from_to, 3);

    cvtColor(dst_lab, dst, COLOR_Lab2LBGR, cn);
}
Ejemplo n.º 5
0
void cv::fastNlMeansDenoising( InputArray _src, OutputArray _dst, float h,
                               int templateWindowSize, int searchWindowSize)
{
    CV_OCL_RUN(_src.dims() <= 2 && (_src.isUMat() || _dst.isUMat()),
               ocl_fastNlMeansDenoising(_src, _dst, h, templateWindowSize, searchWindowSize))

    Mat src = _src.getMat();
    _dst.create(src.size(), src.type());
    Mat dst = _dst.getMat();

#ifdef HAVE_TEGRA_OPTIMIZATION
    if(tegra::fastNlMeansDenoising(src, dst, h, templateWindowSize, searchWindowSize))
        return;
#endif

    switch (src.type()) {
        case CV_8U:
            parallel_for_(cv::Range(0, src.rows),
                FastNlMeansDenoisingInvoker<uchar>(
                    src, dst, templateWindowSize, searchWindowSize, h));
            break;
        case CV_8UC2:
            parallel_for_(cv::Range(0, src.rows),
                FastNlMeansDenoisingInvoker<cv::Vec2b>(
                    src, dst, templateWindowSize, searchWindowSize, h));
            break;
        case CV_8UC3:
            parallel_for_(cv::Range(0, src.rows),
                FastNlMeansDenoisingInvoker<cv::Vec3b>(
                    src, dst, templateWindowSize, searchWindowSize, h));
            break;
        default:
            CV_Error(Error::StsBadArg,
                "Unsupported image format! Only CV_8UC1, CV_8UC2 and CV_8UC3 are supported");
    }
}
Ejemplo n.º 6
0
int cv::meanShift( InputArray _probImage, Rect& window, TermCriteria criteria )
{
    CV_INSTRUMENT_REGION()

    Size size;
    int cn;
    Mat mat;
    UMat umat;
    bool isUMat = _probImage.isUMat();

    if (isUMat)
        umat = _probImage.getUMat(), cn = umat.channels(), size = umat.size();
    else
        mat = _probImage.getMat(), cn = mat.channels(), size = mat.size();

    Rect cur_rect = window;

    CV_Assert( cn == 1 );

    if( window.height <= 0 || window.width <= 0 )
        CV_Error( Error::StsBadArg, "Input window has non-positive sizes" );

    window = window & Rect(0, 0, size.width, size.height);

    double eps = (criteria.type & TermCriteria::EPS) ? std::max(criteria.epsilon, 0.) : 1.;
    eps = cvRound(eps*eps);
    int i, niters = (criteria.type & TermCriteria::MAX_ITER) ? std::max(criteria.maxCount, 1) : 100;

    for( i = 0; i < niters; i++ )
    {
        cur_rect = cur_rect & Rect(0, 0, size.width, size.height);
        if( cur_rect == Rect() )
        {
            cur_rect.x = size.width/2;
            cur_rect.y = size.height/2;
        }
        cur_rect.width = std::max(cur_rect.width, 1);
        cur_rect.height = std::max(cur_rect.height, 1);

        Moments m = isUMat ? moments(umat(cur_rect)) : moments(mat(cur_rect));

        // Calculating center of mass
        if( fabs(m.m00) < DBL_EPSILON )
            break;

        int dx = cvRound( m.m10/m.m00 - window.width*0.5 );
        int dy = cvRound( m.m01/m.m00 - window.height*0.5 );

        int nx = std::min(std::max(cur_rect.x + dx, 0), size.width - cur_rect.width);
        int ny = std::min(std::max(cur_rect.y + dy, 0), size.height - cur_rect.height);

        dx = nx - cur_rect.x;
        dy = ny - cur_rect.y;
        cur_rect.x = nx;
        cur_rect.y = ny;

        // Check for coverage centers mass & window
        if( dx*dx + dy*dy < eps )
            break;
    }

    window = cur_rect;
    return i;
}
Ejemplo n.º 7
0
cv::RotatedRect cv::CamShift( InputArray _probImage, Rect& window,
                              TermCriteria criteria )
{
    CV_INSTRUMENT_REGION()

    const int TOLERANCE = 10;
    Size size;
    Mat mat;
    UMat umat;
    bool isUMat = _probImage.isUMat();

    if (isUMat)
        umat = _probImage.getUMat(), size = umat.size();
    else
        mat = _probImage.getMat(), size = mat.size();

    meanShift( _probImage, window, criteria );

    window.x -= TOLERANCE;
    if( window.x < 0 )
        window.x = 0;

    window.y -= TOLERANCE;
    if( window.y < 0 )
        window.y = 0;

    window.width += 2 * TOLERANCE;
    if( window.x + window.width > size.width )
        window.width = size.width - window.x;

    window.height += 2 * TOLERANCE;
    if( window.y + window.height > size.height )
        window.height = size.height - window.y;

    // Calculating moments in new center mass
    Moments m = isUMat ? moments(umat(window)) : moments(mat(window));

    double m00 = m.m00, m10 = m.m10, m01 = m.m01;
    double mu11 = m.mu11, mu20 = m.mu20, mu02 = m.mu02;

    if( fabs(m00) < DBL_EPSILON )
        return RotatedRect();

    double inv_m00 = 1. / m00;
    int xc = cvRound( m10 * inv_m00 + window.x );
    int yc = cvRound( m01 * inv_m00 + window.y );
    double a = mu20 * inv_m00, b = mu11 * inv_m00, c = mu02 * inv_m00;

    // Calculating width & height
    double square = std::sqrt( 4 * b * b + (a - c) * (a - c) );

    // Calculating orientation
    double theta = atan2( 2 * b, a - c + square );

    // Calculating width & length of figure
    double cs = cos( theta );
    double sn = sin( theta );

    double rotate_a = cs * cs * mu20 + 2 * cs * sn * mu11 + sn * sn * mu02;
    double rotate_c = sn * sn * mu20 - 2 * cs * sn * mu11 + cs * cs * mu02;
    double length = std::sqrt( rotate_a * inv_m00 ) * 4;
    double width = std::sqrt( rotate_c * inv_m00 ) * 4;

    // In case, when tetta is 0 or 1.57... the Length & Width may be exchanged
    if( length < width )
    {
        std::swap( length, width );
        std::swap( cs, sn );
        theta = CV_PI*0.5 - theta;
    }

    // Saving results
    int _xc = cvRound( xc );
    int _yc = cvRound( yc );

    int t0 = cvRound( fabs( length * cs ));
    int t1 = cvRound( fabs( width * sn ));

    t0 = MAX( t0, t1 ) + 2;
    window.width = MIN( t0, (size.width - _xc) * 2 );

    t0 = cvRound( fabs( length * sn ));
    t1 = cvRound( fabs( width * cs ));

    t0 = MAX( t0, t1 ) + 2;
    window.height = MIN( t0, (size.height - _yc) * 2 );

    window.x = MAX( 0, _xc - window.width / 2 );
    window.y = MAX( 0, _yc - window.height / 2 );

    window.width = MIN( size.width - window.x, window.width );
    window.height = MIN( size.height - window.y, window.height );

    RotatedRect box;
    box.size.height = (float)length;
    box.size.width = (float)width;
    box.angle = (float)((CV_PI*0.5+theta)*180./CV_PI);
    while(box.angle < 0)
        box.angle += 360;
    while(box.angle >= 360)
        box.angle -= 360;
    if(box.angle >= 180)
        box.angle -= 180;
    box.center = Point2f( window.x + window.width*0.5f, window.y + window.height*0.5f);

    return box;
}