GuidedFilterRefImpl::GuidedFilterRefImpl(InputArray _guide, int _rad, double _eps) :
  height(_guide.rows()), width(_guide.cols()), rad(_rad), chNum(_guide.channels()), eps(_eps)
{
    Mat guide = _guide.getMat();
    CV_Assert(chNum > 0 && chNum <= 3);

    channels = new Mat[chNum];
    exps     = new Mat[chNum];

    A    = new Mat *[chNum];
    vars = new Mat *[chNum];
    for (int i = 0; i < chNum; ++i)
    {
        A[i]    = new Mat[chNum];
        vars[i] = new Mat[chNum];
    }

    split(guide, channels);
    for (int i = 0; i < chNum; ++i)
    {
        channels[i].convertTo(channels[i], CV_32F);
        meanFilter(channels[i], exps[i]);
    }

    computeCovGuide();

    computeCovGuideInv();
}
Ejemplo n.º 2
0
void guiNLMUpsample(InputArray srcimage, OutputArray dest, int resizeFactor, InputArray ref)
{
	string windowName = "weighted mode";
	namedWindow(windowName);
	Mat src = srcimage.getMat();

	int alpha = 0; createTrackbar("a",windowName, &alpha, 100);
	int sw = 0; createTrackbar("sw",windowName, &sw, 1);
	int sw2 = 0; createTrackbar("sw2",windowName, &sw2, 1);

	int tr = 0; createTrackbar("tr",windowName, &tr, 10);
	int sr = 3; createTrackbar("sr",windowName, &sr, 30);
	int h = 100; createTrackbar("h/10",windowName, &h, 255);
	int iter = 2; createTrackbar("iteration",windowName, &iter, 10);

	int key = 0;
	
	while(key!='q')
	{
		Mat srctemp;
		{
			Mat med;
			medianBlur(srcimage, med,1);
			
			src.copyTo(srctemp);
			CalcTime t;
			for(int i=0;i<iter;i++)
			{
				Mat tmp = srctemp.clone();
				if(srcimage.channels()==1) fastNlMeansDenoising(srctemp, tmp, h/10.f, 2*tr+1, 2*sr+1);
				else fastNlMeansDenoisingColored(srctemp, tmp, (float)h/10.f,(float)h/10.f, 2*tr+1, 2*sr+1);

				tmp.copyTo(srctemp);
			}

			//upsampling function
			if(sw==0) hqx(srctemp, dest, resizeFactor);
			else resize(srctemp, dest, Size(src.cols*resizeFactor, src.rows*resizeFactor), 0,0, INTER_LANCZOS4);
			
		}
		//shock filter
		if(sw2!=0)
		{
			Mat a = dest.getMat();
			blurRemoveMinMax(a,a,2);
			a.copyTo(dest);
		}
		//blending referece image for debug
		alphaBlend(ref, dest, alpha/100.0, dest);
		imshow(windowName, dest);
		key = waitKey(30);
		if(key=='f')
		{
			alpha = (alpha != 0) ? 0:100;
			setTrackbarPos("a", windowName, alpha);
		}
	}
	destroyWindow(windowName);
}
Ejemplo n.º 3
0
static bool convolve_32F(InputArray _image, InputArray _templ, OutputArray _result)
{
    _result.create(_image.rows() - _templ.rows() + 1, _image.cols() - _templ.cols() + 1, CV_32F);

    if (_image.channels() == 1)
        return(convolve_dft(_image, _templ, _result));
    else
    {
        UMat image = _image.getUMat();
        UMat templ = _templ.getUMat();
        UMat result_(image.rows-templ.rows+1,(image.cols-templ.cols+1)*image.channels(), CV_32F);
        bool ok = convolve_dft(image.reshape(1), templ.reshape(1), result_);
        if (ok==false)
            return false;
        UMat result = _result.getUMat();
        return (extractFirstChannel_32F(result_, _result, _image.channels()));
    }
}
Ejemplo n.º 4
0
void cv::GlArrays::setColorArray(InputArray color, bool bgra)
{
    int cn = color.channels();

    CV_Assert((cn == 3 && !bgra) || cn == 4);

    color_.copyFrom(color);
    bgra_ = bgra;
}
Ejemplo n.º 5
0
 void calcBtvRegularization(InputArray _src, OutputArray _dst, int btvKernelSize,
                            const std::vector<float>& btvWeights, const UMat & ubtvWeights)
 {
     CV_OCL_RUN(_dst.isUMat(),
                ocl_calcBtvRegularization(_src, _dst, btvKernelSize, ubtvWeights))
     CV_UNUSED(ubtvWeights);
     if (_src.channels() == 1)
     {
         calcBtvRegularizationImpl<float>(_src, _dst, btvKernelSize, btvWeights);
     }
     else if (_src.channels() == 3)
     {
         calcBtvRegularizationImpl<Point3f>(_src, _dst, btvKernelSize, btvWeights);
     }
     else
     {
         CV_Error(Error::StsBadArg, "Unsupported number of channels in _src");
     }
 }
Ejemplo n.º 6
0
void cv::GlArrays::setTexCoordArray(InputArray texCoord)
{
    int cn = texCoord.channels();
    int depth = texCoord.depth();

    CV_Assert(cn >= 1 && cn <= 4);
    CV_Assert(depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F);

    texCoord_.copyFrom(texCoord);
}
Ejemplo n.º 7
0
void cv::GlArrays::setNormalArray(InputArray normal)
{
    int cn = normal.channels();
    int depth = normal.depth();

    CV_Assert(cn == 3);
    CV_Assert(depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F);

    normal_.copyFrom(normal);
}
Ejemplo n.º 8
0
void cv::GlArrays::setVertexArray(InputArray vertex)
{
    int cn = vertex.channels();
    int depth = vertex.depth();

    CV_Assert(cn == 2 || cn == 3 || cn == 4);
    CV_Assert(depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F);

    vertex_.copyFrom(vertex);
}
Ejemplo n.º 9
0
void cv::ogl::Arrays::setColorArray(InputArray color)
{
    const int cn = color.channels();

    CV_Assert( cn == 3 || cn == 4 );

    if (color.kind() == _InputArray::OPENGL_BUFFER)
        color_ = color.getOGlBuffer();
    else
        color_.copyFrom(color);
}
Ejemplo n.º 10
0
void compareRange(InputArray src, OutputArray destMask, const double validMin, const double validMax)
{
    Mat gray;
    if (src.channels() == 1) gray = src.getMat();
    else cvtColor(src, gray, COLOR_BGR2GRAY);

    Mat mask1;
    Mat mask2;
    compare(gray, validMin, mask1, cv::CMP_GE);
    compare(gray, validMax, mask2, cv::CMP_LE);
    bitwise_and(mask1, mask2, destMask);
}
Ejemplo n.º 11
0
static void _prepareImgAndDrawKeypoints( InputArray img1, const std::vector<KeyPoint>& keypoints1,
                                         InputArray img2, const std::vector<KeyPoint>& keypoints2,
                                         InputOutputArray _outImg, Mat& outImg1, Mat& outImg2,
                                         const Scalar& singlePointColor, DrawMatchesFlags flags )
{
    Mat outImg;
    Size img1size = img1.size(), img2size = img2.size();
    Size size( img1size.width + img2size.width, MAX(img1size.height, img2size.height) );
    if( !!(flags & DrawMatchesFlags::DRAW_OVER_OUTIMG) )
    {
        outImg = _outImg.getMat();
        if( size.width > outImg.cols || size.height > outImg.rows )
            CV_Error( Error::StsBadSize, "outImg has size less than need to draw img1 and img2 together" );
        outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
        outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
    }
    else
    {
        const int cn1 = img1.channels(), cn2 = img2.channels();
        const int out_cn = std::max(3, std::max(cn1, cn2));
        _outImg.create(size, CV_MAKETYPE(img1.depth(), out_cn));
        outImg = _outImg.getMat();
        outImg = Scalar::all(0);
        outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
        outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );

        _prepareImage(img1, outImg1);
        _prepareImage(img2, outImg2);
    }

    // draw keypoints
    if( !(flags & DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS) )
    {
        Mat _outImg1 = outImg( Rect(0, 0, img1size.width, img1size.height) );
        drawKeypoints( _outImg1, keypoints1, _outImg1, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );

        Mat _outImg2 = outImg( Rect(img1size.width, 0, img2size.width, img2size.height) );
        drawKeypoints( _outImg2, keypoints2, _outImg2, singlePointColor, flags | DrawMatchesFlags::DRAW_OVER_OUTIMG );
    }
}
Ejemplo n.º 12
0
void cv::ogl::Arrays::setNormalArray(InputArray normal)
{
    const int cn = normal.channels();
    const int depth = normal.depth();

    CV_Assert( cn == 3 );
    CV_Assert( depth == CV_8S || depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );

    if (normal.kind() == _InputArray::OPENGL_BUFFER)
        normal_ = normal.getOGlBuffer();
    else
        normal_.copyFrom(normal);
}
Ejemplo n.º 13
0
void cv::ogl::Arrays::setTexCoordArray(InputArray texCoord)
{
    const int cn = texCoord.channels();
    const int depth = texCoord.depth();

    CV_Assert( cn >= 1 && cn <= 4 );
    CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );

    if (texCoord.kind() == _InputArray::OPENGL_BUFFER)
        texCoord_ = texCoord.getOGlBuffer();
    else
        texCoord_.copyFrom(texCoord);
}
Ejemplo n.º 14
0
void cv::viz::vtkImageMatSource::SetImage(InputArray _image)
{
    CV_Assert(_image.depth() == CV_8U && (_image.channels() == 1 || _image.channels() == 3 || _image.channels() == 4));

    Mat image = _image.getMat();

    this->ImageData->SetDimensions(image.cols, image.rows, 1);
#if VTK_MAJOR_VERSION <= 5
    this->ImageData->SetNumberOfScalarComponents(image.channels());
    this->ImageData->SetScalarTypeToUnsignedChar();
    this->ImageData->AllocateScalars();
#else
    this->ImageData->AllocateScalars(VTK_UNSIGNED_CHAR, image.channels());
#endif

    switch(image.channels())
    {
    case 1: copyGrayImage(image, this->ImageData); break;
    case 3: copyRGBImage (image, this->ImageData); break;
    case 4: copyRGBAImage(image, this->ImageData); break;
    }
    this->ImageData->Modified();
}
Ejemplo n.º 15
0
void cv::ogl::Arrays::setVertexArray(InputArray vertex)
{
    const int cn = vertex.channels();
    const int depth = vertex.depth();

    CV_Assert( cn == 2 || cn == 3 || cn == 4 );
    CV_Assert( depth == CV_16S || depth == CV_32S || depth == CV_32F || depth == CV_64F );

    if (vertex.kind() == _InputArray::OPENGL_BUFFER)
        vertex_ = vertex.getOGlBuffer();
    else
        vertex_.copyFrom(vertex);

    size_ = vertex_.size().area();
}
bool myDetector(InputArray image, OutputArray faces, CascadeClassifier *face_cascade)
{
    Mat gray;

    if (image.channels() > 1)
        cvtColor(image, gray, COLOR_BGR2GRAY);
    else
        gray = image.getMat().clone();

    equalizeHist(gray, gray);

    std::vector<Rect> faces_;
    face_cascade->detectMultiScale(gray, faces_, 1.4, 2, CASCADE_SCALE_IMAGE, Size(30, 30));
    Mat(faces_).copyTo(faces);
    return true;
}
Ejemplo n.º 17
0
    void calcBtvRegularization(InputArray _src, OutputArray _dst, int btvKernelSize,
                               const std::vector<float>& btvWeights, const UMat & ubtvWeights)
    {
        CV_OCL_RUN(_dst.isUMat(),
                   ocl_calcBtvRegularization(_src, _dst, btvKernelSize, ubtvWeights))
        (void)ubtvWeights;

        typedef void (*func_t)(InputArray _src, OutputArray _dst, int btvKernelSize, const std::vector<float>& btvWeights);
        static const func_t funcs[] =
        {
            0, calcBtvRegularizationImpl<float>, 0, calcBtvRegularizationImpl<Point3f>, 0
        };

        const func_t func = funcs[_src.channels()];
        CV_Assert(func != 0);
        func(_src, _dst, btvKernelSize, btvWeights);
    }
void AdaptiveManifoldFilterN::initSrcAndJoint(InputArray src_, InputArray joint_)
{
    srcSize = src_.size();
    smallSize = getSmallSize();
    srcCnNum = src_.channels();

    split(src_, srcCn);
    if (src_.depth() != CV_32F)
    {
        for (int i = 0; i < srcCnNum; i++)
            srcCn[i].convertTo(srcCn[i], CV_32F);
    }

    if (joint_.empty() || joint_.getObj() == src_.getObj())
    {
        jointCnNum = srcCnNum;

        if (src_.depth() == CV_32F)
        {
            jointCn = srcCn;
        }
        else
        {
            jointCn.resize(jointCnNum);
            for (int i = 0; i < jointCnNum; i++)
                srcCn[i].convertTo(jointCn[i], CV_32F, getNormalizer(src_.depth()));
        }
    }
    else
    {
        splitChannels(joint_, jointCn);
        
        jointCnNum = (int)jointCn.size();
        int jointDepth = jointCn[0].depth();
        Size jointSize = jointCn[0].size();

        CV_Assert( jointSize == srcSize && (jointDepth == CV_8U || jointDepth == CV_16U || jointDepth == CV_32F) );

        if (jointDepth != CV_32F)
        {
            for (int i = 0; i < jointCnNum; i++)
                jointCn[i].convertTo(jointCn[i], CV_32F, getNormalizer(jointDepth));
        }
    }
}
Ejemplo n.º 19
0
static bool ocl_norm( InputArray _src, int normType, InputArray _mask, double & result )
{
    const ocl::Device & d = ocl::Device::getDefault();

#ifdef __ANDROID__
    if (d.isNVidia())
        return false;
#endif
    const int cn = _src.channels();
    if (cn > 4)
        return false;
    int type = _src.type(), depth = CV_MAT_DEPTH(type);
    bool doubleSupport = d.doubleFPConfig() > 0,
            haveMask = _mask.kind() != _InputArray::NONE;

    if ( !(normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR) ||
         (!doubleSupport && depth == CV_64F))
        return false;

    UMat src = _src.getUMat();

    if (normType == NORM_INF)
    {
        if (!ocl_minMaxIdx(_src, NULL, &result, NULL, NULL, _mask,
                           std::max(depth, CV_32S), depth != CV_8U && depth != CV_16U))
            return false;
    }
    else if (normType == NORM_L1 || normType == NORM_L2 || normType == NORM_L2SQR)
    {
        Scalar sc;
        bool unstype = depth == CV_8U || depth == CV_16U;

        if ( !ocl_sum(haveMask ? src : src.reshape(1), sc, normType == NORM_L2 || normType == NORM_L2SQR ?
                    OCL_OP_SUM_SQR : (unstype ? OCL_OP_SUM : OCL_OP_SUM_ABS), _mask) )
            return false;

        double s = 0.0;
        for (int i = 0; i < (haveMask ? cn : 1); ++i)
            s += sc[i];

        result = normType == NORM_L1 || normType == NORM_L2SQR ? s : std::sqrt(s);
    }

    return true;
}
Ejemplo n.º 20
0
static bool ocl_matchTemplate( InputArray _img, InputArray _templ, OutputArray _result, int method)
{
    int cn = _img.channels();

    if (cn > 4)
        return false;

    typedef bool (*Caller)(InputArray _img, InputArray _templ, OutputArray _result);

    static const Caller callers[] =
    {
        matchTemplate_SQDIFF, matchTemplate_SQDIFF_NORMED, matchTemplate_CCORR,
        matchTemplate_CCORR_NORMED, matchTemplate_CCOEFF, matchTemplate_CCOEFF_NORMED
    };
    const Caller caller = callers[method];

    return caller(_img, _templ, _result);
}
Ejemplo n.º 21
0
    void upscale(InputArray _src, OutputArray _dst, int scale)
    {
        int cn = _src.channels();
        CV_Assert( cn == 1 || cn == 3 || cn == 4 );

        CV_OCL_RUN(_dst.isUMat(),
                   ocl_upscale(_src, _dst, scale))

        typedef void (*func_t)(InputArray src, OutputArray dst, int scale);
        static const func_t funcs[] =
        {
            0, upscaleImpl<float>, 0, upscaleImpl<Point3f>, upscaleImpl<Point4f>
        };

        const func_t func = funcs[cn];
        CV_Assert(func != 0);
        func(_src, _dst, scale);
    }
Ejemplo n.º 22
0
static void _prepareImage(InputArray src, const Mat& dst)
{
    CV_CheckType(src.type(), src.type() == CV_8UC1 || src.type() == CV_8UC3 || src.type() == CV_8UC4, "Unsupported source image");
    CV_CheckType(dst.type(), dst.type() == CV_8UC3 || dst.type() == CV_8UC4, "Unsupported destination image");
    const int src_cn = src.channels();
    const int dst_cn = dst.channels();

    if (src_cn == dst_cn)
        src.copyTo(dst);
    else if (src_cn == 1)
        cvtColor(src, dst, dst_cn == 3 ? COLOR_GRAY2BGR : COLOR_GRAY2BGRA);
    else if (src_cn == 3 && dst_cn == 4)
        cvtColor(src, dst, COLOR_BGR2BGRA);
    else if (src_cn == 4 && dst_cn == 3)
        cvtColor(src, dst, COLOR_BGRA2BGR);
    else
        CV_Error(Error::StsInternal, "");
}
Ejemplo n.º 23
0
static bool ocl_Laplacian5(InputArray _src, OutputArray _dst,
                           const Mat & kd, const Mat & ks, double scale, double delta,
                           int borderType, int depth, int ddepth)
{
    int iscale = cvRound(scale), idelta = cvRound(delta);
    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0,
            floatCoeff = std::fabs(delta - idelta) > DBL_EPSILON || std::fabs(scale - iscale) > DBL_EPSILON;
    int cn = _src.channels(), wdepth = std::max(depth, floatCoeff ? CV_32F : CV_32S), kercn = 1;

    if (!doubleSupport && wdepth == CV_64F)
        return false;

    char cvt[2][40];
    ocl::Kernel k("sumConvert", ocl::imgproc::laplacian5_oclsrc,
                  format("-D srcT=%s -D WT=%s -D dstT=%s -D coeffT=%s -D wdepth=%d "
                         "-D convertToWT=%s -D convertToDT=%s%s",
                         ocl::typeToStr(CV_MAKE_TYPE(depth, kercn)),
                         ocl::typeToStr(CV_MAKE_TYPE(wdepth, kercn)),
                         ocl::typeToStr(CV_MAKE_TYPE(ddepth, kercn)),
                         ocl::typeToStr(wdepth), wdepth,
                         ocl::convertTypeStr(depth, wdepth, kercn, cvt[0]),
                         ocl::convertTypeStr(wdepth, ddepth, kercn, cvt[1]),
                         doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
    if (k.empty())
        return false;

    UMat d2x, d2y;
    sepFilter2D(_src, d2x, depth, kd, ks, Point(-1, -1), 0, borderType);
    sepFilter2D(_src, d2y, depth, ks, kd, Point(-1, -1), 0, borderType);

    UMat dst = _dst.getUMat();

    ocl::KernelArg d2xarg = ocl::KernelArg::ReadOnlyNoSize(d2x),
            d2yarg = ocl::KernelArg::ReadOnlyNoSize(d2y),
            dstarg = ocl::KernelArg::WriteOnly(dst, cn, kercn);

    if (wdepth >= CV_32F)
        k.args(d2xarg, d2yarg, dstarg, (float)scale, (float)delta);
    else
        k.args(d2xarg, d2yarg, dstarg, iscale, idelta);

    size_t globalsize[] = { dst.cols * cn / kercn, dst.rows };
    return k.run(2, globalsize, NULL, false);
}
Ejemplo n.º 24
0
    static bool ocl_calcBtvRegularization(InputArray _src, OutputArray _dst, int btvKernelSize, const UMat & ubtvWeights)
    {
        int cn = _src.channels();
        ocl::Kernel k("calcBtvRegularization", ocl::superres::superres_btvl1_oclsrc,
                      format("-D cn=%d", cn));
        if (k.empty())
            return false;

        UMat src = _src.getUMat();
        _dst.create(src.size(), src.type());
        _dst.setTo(Scalar::all(0));
        UMat dst = _dst.getUMat();

        const int ksize = (btvKernelSize - 1) / 2;

        k.args(ocl::KernelArg::ReadOnlyNoSize(src), ocl::KernelArg::WriteOnly(dst),
              ksize, ocl::KernelArg::PtrReadOnly(ubtvWeights));

        size_t globalsize[2] = { (size_t)src.cols, (size_t)src.rows };
        return k.run(2, globalsize, NULL, false);
    }
Ejemplo n.º 25
0
void cv::Scharr( InputArray _src, OutputArray _dst, int ddepth, int dx, int dy,
                 double scale, double delta, int borderType )
{
    int stype = _src.type(), sdepth = CV_MAT_DEPTH(stype), cn = CV_MAT_CN(stype);
    if (ddepth < 0)
        ddepth = sdepth;
    _dst.create( _src.size(), CV_MAKETYPE(ddepth, cn) );

#ifdef HAVE_TEGRA_OPTIMIZATION
    if (scale == 1.0 && delta == 0)
    {
        Mat src = _src.getMat(), dst = _dst.getMat();
        if (tegra::scharr(src, dst, dx, dy, borderType))
            return;
    }
#endif

#if defined (HAVE_IPP) && (IPP_VERSION_MAJOR >= 7)
    if(dx < 2 && dy < 2 && _src.channels() == 1 && borderType == 1)
    {
        Mat src = _src.getMat(), dst = _dst.getMat();
        if(IPPDerivScharr(src, dst, ddepth, dx, dy, scale))
            return;
    }
#endif
    int ktype = std::max(CV_32F, std::max(ddepth, sdepth));

    Mat kx, ky;
    getScharrKernels( kx, ky, dx, dy, false, ktype );
    if( scale != 1 )
    {
        // usually the smoothing part is the slowest to compute,
        // so try to scale it instead of the faster differenciating part
        if( dx == 0 )
            kx *= scale;
        else
            ky *= scale;
    }
    sepFilter2D( _src, _dst, ddepth, kx, ky, Point(-1, -1), delta, borderType );
}
Ejemplo n.º 26
0
static bool ipp_Deriv(InputArray _src, OutputArray _dst, int dx, int dy, int ksize, double scale, double delta, int borderType)
{
#ifdef HAVE_IPP_IW
    CV_INSTRUMENT_REGION_IPP()

    ::ipp::IwiSize size(_src.size().width, _src.size().height);
    IppDataType   srcType   = ippiGetDataType(_src.depth());
    IppDataType   dstType   = ippiGetDataType(_dst.depth());
    int           channels  = _src.channels();
    bool          useScale  = false;
    bool          useScharr = false;

    if(channels != _dst.channels() || channels > 1)
        return false;

    if(fabs(delta) > FLT_EPSILON || fabs(scale-1) > FLT_EPSILON)
        useScale = true;

    if(ksize <= 0)
    {
        ksize     = 3;
        useScharr = true;
    }

    IppiMaskSize maskSize = ippiGetMaskSize(ksize, ksize);
    if((int)maskSize < 0)
        return false;

#if IPP_VERSION_X100 <= 201703
    // Bug with mirror wrap
    if(borderType == BORDER_REFLECT_101 && (ksize/2+1 > size.width || ksize/2+1 > size.height))
        return false;
#endif

    IwiDerivativeType derivType = ippiGetDerivType(dx, dy, (useScharr)?false:true);
    if((int)derivType < 0)
        return false;

    // Acquire data and begin processing
    try
    {
        Mat src = _src.getMat();
        Mat dst = _dst.getMat();
        ::ipp::IwiImage iwSrc      = ippiGetImage(src);
        ::ipp::IwiImage iwDst      = ippiGetImage(dst);
        ::ipp::IwiImage iwSrcProc  = iwSrc;
        ::ipp::IwiImage iwDstProc  = iwDst;
        ::ipp::IwiBorderSize  borderSize(maskSize);
        ::ipp::IwiBorderType  ippBorder(ippiGetBorder(iwSrc, borderType, borderSize));
        if(!ippBorder)
            return false;

        if(srcType == ipp8u && dstType == ipp8u)
        {
            iwDstProc.Alloc(iwDst.m_size, ipp16s, channels);
            useScale = true;
        }
        else if(srcType == ipp8u && dstType == ipp32f)
        {
            iwSrc -= borderSize;
            iwSrcProc.Alloc(iwSrc.m_size, ipp32f, channels);
            CV_INSTRUMENT_FUN_IPP(::ipp::iwiScale, iwSrc, iwSrcProc, 1, 0, ::ipp::IwiScaleParams(ippAlgHintFast));
            iwSrcProc += borderSize;
        }

        if(useScharr)
            CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterScharr, iwSrcProc, iwDstProc, derivType, maskSize, ::ipp::IwDefault(), ippBorder);
        else
            CV_INSTRUMENT_FUN_IPP(::ipp::iwiFilterSobel, iwSrcProc, iwDstProc, derivType, maskSize, ::ipp::IwDefault(), ippBorder);

        if(useScale)
            CV_INSTRUMENT_FUN_IPP(::ipp::iwiScale, iwDstProc, iwDst, scale, delta, ::ipp::IwiScaleParams(ippAlgHintFast));
    }
    catch (::ipp::IwException)
    {
        return false;
    }

    return true;
#else
    CV_UNUSED(_src); CV_UNUSED(_dst); CV_UNUSED(dx); CV_UNUSED(dy); CV_UNUSED(ksize); CV_UNUSED(scale); CV_UNUSED(delta); CV_UNUSED(borderType);
    return false;
#endif
}