Ejemplo n.º 1
0
bool Saliency::computeSaliency( InputArray image, OutputArray saliencyMap )
{
  if( image.empty() )
    return false;

  return computeSaliencyImpl( image, saliencyMap );
}
void DepthmapDenoiseWeightedHuberImpl::cacheGValues(InputArray _visibleLightImage){
    using namespace cv::cuda::device::dtam_denoise;
    localStream = cv::cuda::StreamAccessor::getStream(cvStream);
    if (!_visibleLightImage.empty()){
        visibleLightImage=_visibleLightImage.getGpuMat();
        cachedG=0;
    }
    if(cachedG)
        return;//already cached
    if(!alloced)
        allocate(rows,cols);

    
    // Call the gpu function for caching g's
    
    loadConstants(rows, cols, 0, 0, 0, 0, 0, 0,
            0, 0);
    CV_Assert(_g1.isContinuous());
    float* pp = (float*) visibleLightImage.data;//TODO: write a color version.
    float* g1p = (float*)_g1.data;
    float* gxp = (float*)_gx.data;
    float* gyp = (float*)_gy.data;
    computeGCaller(pp,  g1p,  gxp,  gyp, cols);
    cachedG=1;
}
Ejemplo n.º 3
0
    void AKAZE::detectImpl(InputArray image, std::vector<KeyPoint>& keypoints, InputArray mask) const
    {
        cv::Mat img = image.getMat();
        if (img.type() != CV_8UC1)
            cvtColor(image, img, COLOR_BGR2GRAY);

        Mat img1_32;
        img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);

        AKAZEOptions options;
        options.descriptor = static_cast<DESCRIPTOR_TYPE>(descriptor);
        options.descriptor_channels = descriptor_channels;
        options.descriptor_size = descriptor_size;
        options.nsublevels = nsublevels;
        options.dthreshold = dtreshhold;
        options.img_width = img.cols;
        options.img_height = img.rows;

        AKAZEFeatures impl(options);
        impl.Create_Nonlinear_Scale_Space(img1_32);
        impl.Feature_Detection(keypoints);

        if (!mask.empty())
        {
            cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
        }
    }
Ejemplo n.º 4
0
/*!
Niblack binarization algorithm.

@param src [in] Mat, single channel uchar image.
@param dst [out] Mat, result image.
@param windowSize [in] int, window size for calculation.
@param k [in] int, parameter for local threshold.
@return int, 0x0000 = Success.
*/
int Niblack(InputArray src, OutputArray dst, int windowSize, float k)
{
	if (src.type() != CV_8UC1 || src.empty())	
		return 0x0001;	/*!< source image type not supported. */

	/*! update window size, which should be odd. */
	if (windowSize < 2)	
		return 0x0002;	/*!< window size not supported. */
	if (windowSize / 2 == 0)
		windowSize++;

	Mat source, destination;
	Mat sourceUchar = src.getMat();
	sourceUchar.convertTo(source, CV_32FC1);

	/*! calcalte mean and variance via
	D(x) = E(x^2) - (Ex)^2 */
	Mat avg, power, avg_power, power_avg;
	Mat standard;
	boxFilter(source, avg, -1, Size(windowSize, windowSize));
	pow(avg, 2, avg_power);
	pow(source, 2, power);
	boxFilter(power, power_avg, -1, Size(windowSize, windowSize));
	sqrt(power_avg - power_avg, standard);

	/*! calculate local threshold */
	Mat threshold = avg + k * standard;

	/*! Output result */
	dst.create(sourceUchar.size(), CV_8UC1);
	destination = dst.getMat();
	destination = source > threshold;

	return 0x0000;
}
Ejemplo n.º 5
0
bool EM::trainE(InputArray samples,
                InputArray _means0,
                InputArray _covs0,
                InputArray _weights0,
                OutputArray logLikelihoods,
                OutputArray labels,
                OutputArray probs)
{
    Mat samplesMat = samples.getMat();
    vector<Mat> covs0;
    _covs0.getMatVector(covs0);
    
    Mat means0 = _means0.getMat(), weights0 = _weights0.getMat();

    setTrainData(START_E_STEP, samplesMat, 0, !_means0.empty() ? &means0 : 0,
                 !_covs0.empty() ? &covs0 : 0, !_weights0.empty() ? &weights0 : 0);
    return doTrain(START_E_STEP, logLikelihoods, labels, probs);
}
bool CustomPattern::create(InputArray pattern, const Size2f boardSize, OutputArray output)
{
    CV_Assert(!pattern.empty() && (boardSize.area() > 0));

    Mat img = pattern.getMat();
    float pixel_size = (boardSize.width > boardSize.height)?    // Choose the longer side for more accurate calculation
                         float(img.cols) / boardSize.width:     // width is longer
                         float(img.rows) / boardSize.height;    // height is longer
    return init(img, pixel_size, output);
}
Ejemplo n.º 7
0
        void detectAndCompute(InputArray image, InputArray mask,
                              std::vector<KeyPoint>& keypoints,
                              OutputArray descriptors,
                              bool useProvidedKeypoints)
        {
            Mat img = image.getMat();

            if (img_width != img.cols) {
                img_width = img.cols;
                impl.release();
            }

            if (img_height != img.rows) {
                img_height = img.rows;
                impl.release();
            }

            if (impl.empty()) {
                AKAZEOptionsV2 options;
                options.descriptor = descriptor;
                options.descriptor_channels = descriptor_channels;
                options.descriptor_size = descriptor_size;
                options.img_width = img_width;
                options.img_height = img_height;
                options.dthreshold = threshold;
                options.omax = octaves;
                options.nsublevels = sublevels;
                options.diffusivity = diffusivity;

                impl = makePtr<AKAZEFeaturesV2>(options);
            }

            impl->Create_Nonlinear_Scale_Space(img);

            if (!useProvidedKeypoints)
            {
                impl->Feature_Detection(keypoints);
            }

            if (!mask.empty())
            {
                KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
            }

            if( descriptors.needed() )
            {
                Mat& desc = descriptors.getMatRef();
                impl->Compute_Descriptors(keypoints, desc);

                CV_Assert((!desc.rows || desc.cols == descriptorSize()));
                CV_Assert((!desc.rows || (desc.type() == descriptorType())));
            }
        }
Ejemplo n.º 8
0
bool EM::trainM(InputArray samples,
                InputArray _probs0,
                OutputArray logLikelihoods,
                OutputArray labels,
                OutputArray probs)
{
    Mat samplesMat = samples.getMat();
    Mat probs0 = _probs0.getMat();
    
    setTrainData(START_M_STEP, samplesMat, !_probs0.empty() ? &probs0 : 0, 0, 0, 0);
    return doTrain(START_M_STEP, logLikelihoods, labels, probs);
}
Ejemplo n.º 9
0
        void detectAndCompute(InputArray image, InputArray mask,
                              std::vector<KeyPoint>& keypoints,
                              OutputArray descriptors,
                              bool useProvidedKeypoints)
        {
            Mat img = image.getMat();
            if (img.type() != CV_8UC1 && img.type() != CV_16UC1)
                cvtColor(image, img, COLOR_BGR2GRAY);

            Mat img1_32;
            if ( img.depth() == CV_32F )
                img1_32 = img;
            else if ( img.depth() == CV_8U )
                img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);
            else if ( img.depth() == CV_16U )
                img.convertTo(img1_32, CV_32F, 1.0 / 65535.0, 0);

            CV_Assert( ! img1_32.empty() );

            AKAZEOptions options;
            options.descriptor = descriptor;
            options.descriptor_channels = descriptor_channels;
            options.descriptor_size = descriptor_size;
            options.img_width = img.cols;
            options.img_height = img.rows;
            options.dthreshold = threshold;
            options.omax = octaves;
            options.nsublevels = sublevels;
            options.diffusivity = diffusivity;

            AKAZEFeatures impl(options);
            impl.Create_Nonlinear_Scale_Space(img1_32);

            if (!useProvidedKeypoints)
            {
                impl.Feature_Detection(keypoints);
            }

            if (!mask.empty())
            {
                KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
            }

            if( descriptors.needed() )
            {
                Mat& desc = descriptors.getMatRef();
                impl.Compute_Descriptors(keypoints, desc);

                CV_Assert((!desc.rows || desc.cols == descriptorSize()));
                CV_Assert((!desc.rows || (desc.type() == descriptorType())));
            }
        }
Ejemplo n.º 10
0
void DescriptorExtractor::compute( InputArray image, std::vector<KeyPoint>& keypoints, OutputArray descriptors ) const
{
    if( image.empty() || keypoints.empty() )
    {
        descriptors.release();
        return;
    }

    KeyPointsFilter::runByImageBorder( keypoints, image.size(), 0 );
    KeyPointsFilter::runByKeypointSize( keypoints, std::numeric_limits<float>::epsilon() );

    computeImpl( image, keypoints, descriptors );
}
Ejemplo n.º 11
0
/*
 * Compute the descriptors for a set of keypoints in an image.
 * image        The image.
 * keypoints    The input keypoints. Keypoints for which a descriptor cannot be computed are removed.
 * descriptors  Copmputed descriptors. Row i is the descriptor for keypoint i.
 */
void Feature2D::compute( InputArray image,
                         std::vector<KeyPoint>& keypoints,
                         OutputArray descriptors )
{
    CV_INSTRUMENT_REGION();

    if( image.empty() )
    {
        descriptors.release();
        return;
    }
    detectAndCompute(image, noArray(), keypoints, descriptors, true);
}
Ejemplo n.º 12
0
/*
 * Detect keypoints in an image.
 * image        The image.
 * keypoints    The detected keypoints.
 * mask         Mask specifying where to look for keypoints (optional). Must be a char
 *              matrix with non-zero values in the region of interest.
 */
void Feature2D::detect( InputArray image,
                        std::vector<KeyPoint>& keypoints,
                        InputArray mask )
{
    CV_INSTRUMENT_REGION();

    if( image.empty() )
    {
        keypoints.clear();
        return;
    }
    detectAndCompute(image, mask, keypoints, noArray(), false);
}
Ejemplo n.º 13
0
void cv::softcascade::SCascade::detect(InputArray _image, InputArray _rois, OutputArray _objects, cv::gpu::Stream& s) const
{
    CV_Assert(fields);

    // only color images and precomputed integrals are supported
    int type = _image.type();
    CV_Assert(type == CV_8UC3 || type == CV_32SC1 || (!_rois.empty()));

    const cv::gpu::GpuMat image = _image.getGpuMat();

    if (_objects.empty()) _objects.create(1, 4096 * sizeof(Detection), CV_8UC1);

    cv::gpu::GpuMat rois = _rois.getGpuMat(), objects = _objects.getGpuMat();

    /// roi
    Fields& flds = *fields;
    int shr = flds.shrinkage;

    flds.mask.create( rois.cols / shr, rois.rows / shr, rois.type());

    device::shrink(rois, flds.mask);
    //cv::gpu::transpose(flds.genRoiTmp, flds.mask, s);

    if (type == CV_8UC3)
    {
        flds.update(image.rows, image.cols, flds.shrinkage);

        if (flds.check((float)minScale, (float)maxScale, scales))
            flds.createLevels(image.rows, image.cols);

        flds.preprocessor->apply(image, flds.shrunk);
        integral(flds.shrunk, flds.hogluv, flds.integralBuffer, s);
    }
    else
    {
        if (s)
            s.enqueueCopy(image, flds.hogluv);
        else
            image.copyTo(flds.hogluv);
    }

    flds.detect(objects, s);

    if ( (flags && NMS_MASK) != NO_REJECT)
    {
        cv::gpu::GpuMat spr(objects, cv::Rect(0, 0, flds.suppressed.cols, flds.suppressed.rows));
        flds.suppress(objects, s);
        flds.suppressed.copyTo(spr);
    }
}
Ejemplo n.º 14
0
static bool ocl_accumulate( InputArray _src, InputArray _src2, InputOutputArray _dst, double alpha,
                            InputArray _mask, int op_type )
{
    CV_Assert(op_type == ACCUMULATE || op_type == ACCUMULATE_SQUARE ||
              op_type == ACCUMULATE_PRODUCT || op_type == ACCUMULATE_WEIGHTED);

    int stype = _src.type(), cn = CV_MAT_CN(stype);
    int sdepth = CV_MAT_DEPTH(stype), ddepth = _dst.depth();

    bool doubleSupport = ocl::Device::getDefault().doubleFPConfig() > 0,
            haveMask = !_mask.empty();

    if (!doubleSupport && (sdepth == CV_64F || ddepth == CV_64F))
        return false;

    const char * const opMap[4] = { "ACCUMULATE", "ACCUMULATE_SQUARE", "ACCUMULATE_PRODUCT",
                                   "ACCUMULATE_WEIGHTED" };

    ocl::Kernel k("accumulate", ocl::imgproc::accumulate_oclsrc,
                  format("-D %s%s -D srcT=%s -D cn=%d -D dstT=%s%s",
                         opMap[op_type], haveMask ? " -D HAVE_MASK" : "",
                         ocl::typeToStr(sdepth), cn, ocl::typeToStr(ddepth),
                         doubleSupport ? " -D DOUBLE_SUPPORT" : ""));
    if (k.empty())
        return false;

    UMat src = _src.getUMat(), src2 = _src2.getUMat(), dst = _dst.getUMat(), mask = _mask.getUMat();

    ocl::KernelArg srcarg = ocl::KernelArg::ReadOnlyNoSize(src),
            src2arg = ocl::KernelArg::ReadOnlyNoSize(src2),
            dstarg = ocl::KernelArg::ReadWrite(dst),
            maskarg = ocl::KernelArg::ReadOnlyNoSize(mask);

    int argidx = k.set(0, srcarg);
    if (op_type == ACCUMULATE_PRODUCT)
        argidx = k.set(argidx, src2arg);
    argidx = k.set(argidx, dstarg);
    if (op_type == ACCUMULATE_WEIGHTED)
    {
        if (ddepth == CV_32F)
            argidx = k.set(argidx, (float)alpha);
        else
            argidx = k.set(argidx, alpha);
    }
    if (haveMask)
        k.set(argidx, maskarg);

    size_t globalsize[2] = { src.cols, src.rows };
    return k.run(2, globalsize, NULL, false);
}
Ejemplo n.º 15
0
Archivo: icp.cpp Proyecto: hayborl/ctok
void ICP::run(bool withCuda, InputArray initObjSet)
{
	assert(!m_objSet.empty() && !m_modSet.empty());

	double d_pre = 100000, d_now = 100000;
	int iterCnt = 0;
	Mat objSet;
	Transformation tr;

	if (initObjSet.empty())
	{
		objSet = m_objSet.clone();
	}
	else
	{
		objSet = initObjSet.getMat();
	}

/*	plotTwoPoint3DSet(objSet, m_modSet);*/

	do 
	{
		d_pre = d_now;

		Mat closestSet;
		Mat lambda(objSet.rows, 1, CV_64FC1);
		RUNANDTIME(global_timer, closestSet = 
			getClosestPointsSet(objSet, lambda, KDTREE).clone(), 
			OUTPUT && SUBOUTPUT, "compute closest points.");
		Mat tmpObjSet = convertMat(m_objSet);
		Mat tmpModSet = convertMat(closestSet);
		RUNANDTIME(global_timer, tr = 
			computeTransformation(tmpObjSet, tmpModSet, lambda), 
			OUTPUT && SUBOUTPUT, "compute transformation");
		Mat transformMat = tr.toMatrix();
		RUNANDTIME(global_timer, transformPointCloud(
			m_objSet, objSet, transformMat, withCuda), 
			OUTPUT && SUBOUTPUT, "transform points.");
		RUNANDTIME(global_timer, 
			d_now = computeError(objSet, closestSet, lambda, withCuda),
			OUTPUT && SUBOUTPUT, "compute error.");

		iterCnt++;
	} while (fabs(d_pre - d_now) > m_epsilon && iterCnt <= m_iterMax);

	m_tr = tr;
/*	waitKey();*/

/*	plotTwoPoint3DSet(objSet, m_modSet);*/
}
Ejemplo n.º 16
0
Archivo: ippe.cpp Proyecto: lz89/IPPE
void IPPE::PoseSolver::solveSquare(float squareLength, InputArray _imagePoints, InputArray _cameraMatrix, InputArray _distCoeffs,
                                   OutputArray _rvec1, OutputArray _tvec1, float& err1, OutputArray _rvec2, OutputArray _tvec2, float& err2)
{

    //allocate outputs:
    _rvec1.create(3, 1, CV_64FC1);
    _tvec1.create(3, 1, CV_64FC1);
    _rvec2.create(3, 1, CV_64FC1);
    _tvec2.create(3, 1, CV_64FC1);

    cv::Mat normalizedInputPoints; //undistored version of imagePoints
    cv::Mat objectPoints2D;

    //generate the object points:
    generateSquareObjectCorners2D(squareLength, objectPoints2D);


    cv::Mat H; //homography from canonical object points to normalized pixels


    if (_cameraMatrix.empty()) {
        //this means imagePoints are defined in normalized pixel coordinates, so just copy it:
        _imagePoints.copyTo(normalizedInputPoints);
    }
    else {
        //undistort the image points (i.e. put them in normalized pixel coordinates).
        cv::undistortPoints(_imagePoints, normalizedInputPoints, _cameraMatrix, _distCoeffs);
    }

    //compute H
    homographyFromSquarePoints(normalizedInputPoints, squareLength / 2.0f, H);

    //now solve
    cv::Mat Ma, Mb;
    solveCanonicalForm(objectPoints2D, normalizedInputPoints, H, Ma, Mb);

    //sort poses according to reprojection error:
    cv::Mat M1, M2;
    cv::Mat objectPoints3D;
    generateSquareObjectCorners3D(squareLength, objectPoints3D);
    sortPosesByReprojError(objectPoints3D, _imagePoints, _cameraMatrix, _distCoeffs, Ma, Mb, M1, M2, err1, err2);

    //fill outputs
    rot2vec(M1.colRange(0, 3).rowRange(0, 3), _rvec1);
    rot2vec(M2.colRange(0, 3).rowRange(0, 3), _rvec2);

    M1.colRange(3, 4).rowRange(0, 3).copyTo(_tvec1);
    M2.colRange(3, 4).rowRange(0, 3).copyTo(_tvec2);
}
Ejemplo n.º 17
0
void UMat::copyTo(OutputArray _dst, InputArray _mask) const
{
    if( _mask.empty() )
    {
        copyTo(_dst);
        return;
    }
#ifdef HAVE_OPENCL
    int cn = channels(), mtype = _mask.type(), mdepth = CV_MAT_DEPTH(mtype), mcn = CV_MAT_CN(mtype);
    CV_Assert( mdepth == CV_8U && (mcn == 1 || mcn == cn) );

    if (ocl::useOpenCL() && _dst.isUMat() && dims <= 2)
    {
        UMatData * prevu = _dst.getUMat().u;
        _dst.create( dims, size, type() );

        UMat dst = _dst.getUMat();

        bool haveDstUninit = false;
        if( prevu != dst.u ) // do not leave dst uninitialized
            haveDstUninit = true;

        String opts = format("-D COPY_TO_MASK -D T1=%s -D scn=%d -D mcn=%d%s",
                             ocl::memopTypeToStr(depth()), cn, mcn,
                             haveDstUninit ? " -D HAVE_DST_UNINIT" : "");

        ocl::Kernel k("copyToMask", ocl::core::copyset_oclsrc, opts);
        if (!k.empty())
        {
            k.args(ocl::KernelArg::ReadOnlyNoSize(*this),
                   ocl::KernelArg::ReadOnlyNoSize(_mask.getUMat()),
                   haveDstUninit ? ocl::KernelArg::WriteOnly(dst) :
                                   ocl::KernelArg::ReadWrite(dst));

            size_t globalsize[2] = { cols, rows };
            if (k.run(2, globalsize, NULL, false))
            {
                CV_IMPL_ADD(CV_IMPL_OCL);
                return;
            }
        }
    }
#endif
    Mat src = getMat(ACCESS_READ);
    src.copyTo(_dst, _mask);
}
void AdaptiveManifoldFilterN::initSrcAndJoint(InputArray src_, InputArray joint_)
{
    srcSize = src_.size();
    smallSize = getSmallSize();
    srcCnNum = src_.channels();

    split(src_, srcCn);
    if (src_.depth() != CV_32F)
    {
        for (int i = 0; i < srcCnNum; i++)
            srcCn[i].convertTo(srcCn[i], CV_32F);
    }

    if (joint_.empty() || joint_.getObj() == src_.getObj())
    {
        jointCnNum = srcCnNum;

        if (src_.depth() == CV_32F)
        {
            jointCn = srcCn;
        }
        else
        {
            jointCn.resize(jointCnNum);
            for (int i = 0; i < jointCnNum; i++)
                srcCn[i].convertTo(jointCn[i], CV_32F, getNormalizer(src_.depth()));
        }
    }
    else
    {
        splitChannels(joint_, jointCn);
        
        jointCnNum = (int)jointCn.size();
        int jointDepth = jointCn[0].depth();
        Size jointSize = jointCn[0].size();

        CV_Assert( jointSize == srcSize && (jointDepth == CV_8U || jointDepth == CV_16U || jointDepth == CV_32F) );

        if (jointDepth != CV_32F)
        {
            for (int i = 0; i < jointCnNum; i++)
                jointCn[i].convertTo(jointCn[i], CV_32F, getNormalizer(jointDepth));
        }
    }
}
Ejemplo n.º 19
0
void filmGrain(InputArray src, OutputArray dst, int grainValue, int seed)
{
    CV_Assert(!src.empty());
    CV_Assert(src.type() == CV_8UC1 || src.type() == CV_8UC3);

    Mat image=src.getMat();
    Mat noise;
    noise.create(image.size(), CV_8UC1);
    RNG rng(seed);
    rng.fill(noise, RNG::UNIFORM, 0, grainValue);
    dst.create(src.size(), src.type());
    Mat dstMat=dst.getMat();
    if(src.type()==CV_8UC3)
    {
        cvtColor(noise, noise, COLOR_GRAY2RGB);
    }
    dstMat=image+noise;
}
Ejemplo n.º 20
0
    void AKAZE::operator()(InputArray image, InputArray mask,
        std::vector<KeyPoint>& keypoints,
        OutputArray descriptors,
        bool useProvidedKeypoints) const
    {
        cv::Mat img = image.getMat();
        if (img.type() != CV_8UC1)
            cvtColor(image, img, COLOR_BGR2GRAY);

        Mat img1_32;
        img.convertTo(img1_32, CV_32F, 1.0 / 255.0, 0);

        cv::Mat& desc = descriptors.getMatRef();

        AKAZEOptions options;
        options.descriptor = static_cast<DESCRIPTOR_TYPE>(descriptor);
        options.descriptor_channels = descriptor_channels;
        options.descriptor_size = descriptor_size;
        options.nsublevels = nsublevels;
        options.dthreshold = dtreshhold;
        options.img_width = img.cols;
        options.img_height = img.rows;

        AKAZEFeatures impl(options);
        impl.Create_Nonlinear_Scale_Space(img1_32);

        if (!useProvidedKeypoints)
        {
            impl.Feature_Detection(keypoints);
        }

        if (!mask.empty())
        {
            cv::KeyPointsFilter::runByPixelsMask(keypoints, mask.getMat());
        }

        impl.Compute_Descriptors(keypoints, desc);

        CV_Assert((!desc.rows || desc.cols == descriptorSize()));
        CV_Assert((!desc.rows || (desc.type() == descriptorType())));
    }
Ejemplo n.º 21
0
void UMat::copyTo(OutputArray _dst, InputArray _mask) const
{
    if( _mask.empty() )
    {
        copyTo(_dst);
        return;
    }

    int cn = channels(), mtype = _mask.type(), mdepth = CV_MAT_DEPTH(mtype), mcn = CV_MAT_CN(mtype);
    CV_Assert( mdepth == CV_8U && (mcn == 1 || mcn == cn) );

    if (ocl::useOpenCL() && _dst.isUMat() && dims <= 2)
    {
        UMatData * prevu = _dst.getUMat().u;
        _dst.create( dims, size, type() );

        UMat dst = _dst.getUMat();

        if( prevu != dst.u ) // do not leave dst uninitialized
            dst = Scalar(0);

        ocl::Kernel k("copyToMask", ocl::core::copyset_oclsrc,
                      format("-D COPY_TO_MASK -D T=%s -D scn=%d -D mcn=%d",
                             ocl::memopTypeToStr(depth()), cn, mcn));
        if (!k.empty())
        {
            k.args(ocl::KernelArg::ReadOnlyNoSize(*this), ocl::KernelArg::ReadOnlyNoSize(_mask.getUMat()),
                   ocl::KernelArg::WriteOnly(dst));

            size_t globalsize[2] = { cols, rows };
            if (k.run(2, globalsize, NULL, false))
                return;
        }
    }

    Mat src = getMat(ACCESS_READ);
    src.copyTo(_dst, _mask);
}
Ejemplo n.º 22
0
GpuMat cv::cuda::getInputMat(InputArray _src, Stream& stream)
{
    GpuMat src;

#ifndef HAVE_CUDA
    (void) _src;
    (void) stream;
    throw_no_cuda();
#else
    if (_src.kind() == _InputArray::CUDA_GPU_MAT)
    {
        src = _src.getGpuMat();
    }
    else if (!_src.empty())
    {
        BufferPool pool(stream);
        src = pool.getBuffer(_src.size(), _src.type());
        src.upload(_src, stream);
    }
#endif

    return src;
}
bool CustomPattern::findPattern(InputArray image, OutputArray matched_features, OutputArray pattern_points,
                                const double ratio, const double proj_error, const bool refine_position, OutputArray out,
                                OutputArray H, OutputArray pattern_corners)
{
    CV_Assert(!image.empty() && proj_error > 0);

    Mat img = image.getMat();
    vector<Point2f> m_ftrs;
    vector<Point3f> pattern_pts;
    Mat _H;
    vector<Point2f> scene_corners;
    if (!findPatternPass(img, m_ftrs, pattern_pts, _H, scene_corners, 0.6, proj_error, refine_position))
        return false; // pattern not found

    Mat mask = Mat::zeros(img.size(), CV_8UC1);
    vector<vector<Point> > obj(1);
    vector<Point> scorners_int(scene_corners.size());
    for (uint i = 0; i < scene_corners.size(); ++i)
        scorners_int[i] = (Point)scene_corners[i]; // for drawContours
    obj[0] = scorners_int;
    drawContours(mask, obj, 0, Scalar(255), FILLED);

    // Second pass
    Mat output;
    if (!findPatternPass(img, m_ftrs, pattern_pts, _H, scene_corners,
                         ratio, proj_error, refine_position, mask, output))
        return false; // pattern not found

    Mat(m_ftrs).copyTo(matched_features);
    Mat(pattern_pts).copyTo(pattern_points);
    if (out.needed()) output.copyTo(out);
    if (H.needed()) _H.copyTo(H);
    if (pattern_corners.needed()) Mat(scene_corners).copyTo(pattern_corners);

    return (!m_ftrs.empty());
}
Ejemplo n.º 24
0
    void KAZE::operator()(InputArray _image, InputArray _mask, vector<KeyPoint>& _keypoints,
        OutputArray _descriptors, bool useProvidedKeypoints) const
    {

        bool do_keypoints = !useProvidedKeypoints;
        bool do_descriptors = _descriptors.needed();

        if( (!do_keypoints && !do_descriptors) || _image.empty() )
            return;
        
        cv::Mat img1_8, img1_32;

        // Convert to gray scale iamge and float image
        if (_image.getMat().channels() == 3)
            cv::cvtColor(_image, img1_8, CV_RGB2GRAY);
        else
            _image.getMat().copyTo(img1_8);

        img1_8.convertTo(img1_32, CV_32F, 1.0/255.0,0);

        // Construct KAZE
        toptions opt = options;
        opt.img_width = img1_32.cols;
        opt.img_height = img1_32.rows;

        ::KAZE kazeEvolution(opt);

        // Create nonlinear scale space
        kazeEvolution.Create_Nonlinear_Scale_Space(img1_32);        

        // Feature detection
        std::vector<Ipoint> kazePoints;

        if (do_keypoints)
        {
            kazeEvolution.Feature_Detection(kazePoints);
            filterDuplicated(kazePoints);

            if (!_mask.empty())
            {
                filterByPixelsMask(kazePoints, _mask.getMat());
            }

            if (opt.nfeatures > 0)
            {
                filterRetainBest(kazePoints, opt.nfeatures);
            }
            
        }
        else
        {
            kazePoints.resize(_keypoints.size());

            #pragma omp parallel for
            for (int i = 0; i < kazePoints.size(); i++)
            {
                convertPoint(_keypoints[i], kazePoints[i]);    
            }
        }
        
        // Descriptor caculation
        if (do_descriptors)
        {
            kazeEvolution.Feature_Description(kazePoints);

            cv::Mat& descriptors = _descriptors.getMatRef();
            descriptors.create(kazePoints.size(), descriptorSize(), descriptorType());

            for (size_t i = 0; i < kazePoints.size(); i++)
            {
                std::copy(kazePoints[i].descriptor.begin(), kazePoints[i].descriptor.end(), (float*)descriptors.row(i).data);
            }
        }

        // Transfer from KAZE::Ipoint to cv::KeyPoint
        if (do_keypoints)
        {
            _keypoints.resize(kazePoints.size());

            #pragma omp parallel for
            for (int i = 0; i < kazePoints.size(); i++)
            {
                convertPoint(kazePoints[i], _keypoints[i]);
            }
        }
        
    }
Ejemplo n.º 25
0
void cv::ogl::render(const ogl::Arrays& arr, InputArray indices, int mode, Scalar color)
{
#ifndef HAVE_OPENGL
    (void) arr;
    (void) indices;
    (void) mode;
    (void) color;
    throw_no_ogl();
#else
    if (!arr.empty() && !indices.empty())
    {
        gl::Color3d(color[0] / 255.0, color[1] / 255.0, color[2] / 255.0);

        arr.bind();

        const int kind = indices.kind();

        switch (kind)
        {
        case _InputArray::OPENGL_BUFFER :
            {
                ogl::Buffer buf = indices.getOGlBuffer();

                const int depth = buf.depth();

                CV_Assert( buf.channels() == 1 );
                CV_Assert( depth <= CV_32S );

                GLenum type;
                if (depth < CV_16U)
                    type = gl::UNSIGNED_BYTE;
                else if (depth < CV_32S)
                    type = gl::UNSIGNED_SHORT;
                else
                    type = gl::UNSIGNED_INT;

                buf.bind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);

                gl::DrawElements(mode, buf.size().area(), type, 0);

                ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);

                break;
            }

        default:
            {
                Mat mat = indices.getMat();

                const int depth = mat.depth();

                CV_Assert( mat.channels() == 1 );
                CV_Assert( depth <= CV_32S );
                CV_Assert( mat.isContinuous() );

                GLenum type;
                if (depth < CV_16U)
                    type = gl::UNSIGNED_BYTE;
                else if (depth < CV_32S)
                    type = gl::UNSIGNED_SHORT;
                else
                    type = gl::UNSIGNED_INT;

                ogl::Buffer::unbind(ogl::Buffer::ELEMENT_ARRAY_BUFFER);

                gl::DrawElements(mode, mat.size().area(), type, mat.data);
            }
        }
    }
#endif
}