void FastBGSubtract() //Silhouette_Final에 바로 실루엣 데이터를 써넣는다.
{
	static UMat CL_Background_MOG;

	static Ptr<BackgroundSubtractor> pMOG2; //MOG2 Background subtractor  

	if (pMOG2 == NULL)
		pMOG2 = createBackgroundSubtractorMOG2(200, 16.0, true);

	pMOG2->apply(CL_Current_Frame, CL_Background_MOG);

	CL_Background_MOG.copyTo(Silhouette_Final);



	for (int x = 0; x < Cols; x++)
	{
		for (int y = 0; y < Rows; y++)
		{
			if (Silhouette_Final.data[y*Cols + x] == 127)
				Silhouette_Final.data[y*Cols + x] = 0;
		}
	}
	


}
Example #2
0
// draw both pure-C++ and ocl square results onto a single image
static UMat drawSquaresBoth( const UMat& image,
                            const vector<vector<Point> >& sqs)
{
    UMat imgToShow(Size(image.cols, image.rows), image.type());
    image.copyTo(imgToShow);

    drawSquares(imgToShow, sqs);

    return imgToShow;
}
Example #3
0
UMat UMat::diag(const UMat& d)
{
    CV_Assert( d.cols == 1 || d.rows == 1 );
    int len = d.rows + d.cols - 1;
    UMat m(len, len, d.type(), Scalar(0));
    UMat md = m.diag();
    if( d.cols == 1 )
        d.copyTo(md);
    else
        transpose(d, md);
    return m;
}
Example #4
0
    bool BTVL1::ocl_readNextFrame(Ptr<FrameSource>& /*frameSource*/)
    {
        ucurFrame_.convertTo(at(storePos_, uframes_), CV_32F);

        if (storePos_ > 0)
        {
            opticalFlow_->calc(uprevFrame_, ucurFrame_, at(storePos_ - 1, uforwardMotions_));
            opticalFlow_->calc(ucurFrame_, uprevFrame_, at(storePos_, ubackwardMotions_));
        }

        ucurFrame_.copyTo(uprevFrame_);
        return true;
    }
Example #5
0
static bool ocl_repeat(InputArray _src, int ny, int nx, OutputArray _dst)
{
    UMat src = _src.getUMat(), dst = _dst.getUMat();

    for (int y = 0; y < ny; ++y)
        for (int x = 0; x < nx; ++x)
        {
            Rect roi(x * src.cols, y * src.rows, src.cols, src.rows);
            UMat hdr(dst, roi);
            src.copyTo(hdr);
        }
    return true;
}
void PedestrianDetector::detect(UMat frame, std::vector<Rect> &found){
	UMat img_aux, img;
	
	//work begin
	// Change format of the image
	if (make_gray_) cvtColor(frame, img_aux, COLOR_BGR2GRAY);
	else frame.copyTo(img_aux);

	// Resize image
	if (abs(scale_ - 1.0) > 0.001)
	{
		Size sz((int)((double)img_aux.cols / resize_scale_), (int)((double)img_aux.rows / resize_scale_));
		resize(img_aux, img, sz);
	}
	else { img = img_aux; }
	
	hog_.nlevels = nlevels_;
	
	//hog performance
	hog_.detectMultiScale(img, found, hit_threshold_, win_stride_, Size(0, 0), scale_, gr_threshold_);
	//hog_.detectMultiScale(img, found, hit_threshold_, win_stride_, Size(32, 32), scale_, gr_threshold_);  // If padding is not Size(0,0), OpenCL is not used..
}
void FlowSourceOpenCV_sV::buildFlowOpenCV_3(cv::UMat& uprevgray, cv::UMat& ugray, std::string flowfilename)
{
    dumpAlgosParams();
    qDebug() << "Have OpenCL: " << cv::ocl::haveOpenCL() << " useOpenCL:" << cv::ocl::useOpenCL();
    UMat uflow;
    if (algo == 1) { // DualTVL1
        cv::Ptr<cv::DualTVL1OpticalFlow> tvl1 = cv::createOptFlow_DualTVL1();
        tvl1->setLambda(lambda);
        tvl1->setTau(tau);
        tvl1->setScalesNumber(nscales);
        tvl1->setWarpingsNumber(warps);
        tvl1->setOuterIterations(iterations);
        tvl1->setEpsilon(epsilon);
        tvl1->calc(
                uprevgray,
                ugray,
                uflow
                );
    } else { // _FARN_
        calcOpticalFlowFarneback(
                uprevgray,
                ugray,
                uflow,
                pyrScale, //0.5,
                numLevels, //3,
                winSize, //15,
                numIters, //8,
                polyN, //5,
                polySigma, //1.2,
                flags //0
                );
    }
    Mat flow;
    uflow.copyTo(flow);
    qDebug() << "finished";
    drawOptFlowMap(flow, flowfilename);
}
Example #8
0
    bool forward_ocl(InputArrayOfArrays inps, OutputArrayOfArrays outs, OutputArrayOfArrays internals)
    {
        std::vector<UMat> inputs;
        std::vector<UMat> outputs;

        inps.getUMatVector(inputs);
        outs.getUMatVector(outputs);

        for (size_t i = 0; i < inputs.size(); i++)
        {
            UMat srcBlob = inputs[i];
            void *src_handle = inputs[i].handle(ACCESS_READ);
            void *dst_handle = outputs[i].handle(ACCESS_WRITE);
            if (src_handle != dst_handle)
            {
                MatShape outShape = shape(outputs[i]);
                UMat umat = srcBlob.reshape(1, (int)outShape.size(), &outShape[0]);
                umat.copyTo(outputs[i]);
            }
        }
        outs.assign(outputs);

        return true;
    }
Example #9
0
 void UMatToVector(const UMat & um, std::vector<Point2f> & v) const
 {
     v.resize(um.size().area());
     um.copyTo(Mat(um.size(), CV_32FC2, &v[0]));
 }
Example #10
0
bool SURF_OCL::computeDescriptors(const UMat &keypoints, OutputArray _descriptors)
{
    int dsize = params->descriptorSize();
    int nFeatures = keypoints.cols;
    if (nFeatures == 0)
    {
        _descriptors.release();
        return true;
    }
    _descriptors.create(nFeatures, dsize, CV_32F);
    UMat descriptors;
    if( _descriptors.isUMat() )
        descriptors = _descriptors.getUMat();
    else
        descriptors.create(nFeatures, dsize, CV_32F);

    ocl::Kernel kerCalcDesc, kerNormDesc;

    if( dsize == 64 )
    {
        kerCalcDesc.create("SURF_computeDescriptors64", ocl::xfeatures2d::surf_oclsrc, kerOpts);
        kerNormDesc.create("SURF_normalizeDescriptors64", ocl::xfeatures2d::surf_oclsrc, kerOpts);
    }
    else
    {
        CV_Assert(dsize == 128);
        kerCalcDesc.create("SURF_computeDescriptors128", ocl::xfeatures2d::surf_oclsrc, kerOpts);
        kerNormDesc.create("SURF_normalizeDescriptors128", ocl::xfeatures2d::surf_oclsrc, kerOpts);
    }

    size_t localThreads[] = {6, 6};
    size_t globalThreads[] = {nFeatures*localThreads[0], localThreads[1]};

    if(haveImageSupport)
    {
        kerCalcDesc.args(imgTex,
                         img_rows, img_cols,
                         ocl::KernelArg::ReadOnlyNoSize(keypoints),
                         ocl::KernelArg::WriteOnlyNoSize(descriptors));
    }
    else
    {
        kerCalcDesc.args(ocl::KernelArg::ReadOnlyNoSize(img),
                         img_rows, img_cols,
                         ocl::KernelArg::ReadOnlyNoSize(keypoints),
                         ocl::KernelArg::WriteOnlyNoSize(descriptors));
    }

    if(!kerCalcDesc.run(2, globalThreads, localThreads, true))
        return false;

    size_t localThreads_n[] = {dsize, 1};
    size_t globalThreads_n[] = {nFeatures*localThreads_n[0], localThreads_n[1]};

    globalThreads[0] = nFeatures * localThreads[0];
    globalThreads[1] = localThreads[1];
    bool ok = kerNormDesc.args(ocl::KernelArg::ReadWriteNoSize(descriptors)).
                        run(2, globalThreads_n, localThreads_n, true);
    if(ok && !_descriptors.isUMat())
        descriptors.copyTo(_descriptors);
    return ok;
}
void detectAndDraw( UMat& img, Mat& canvas, CascadeClassifier& cascade, double scale0)
{
if( ProfileTracker x = ProfileTrackParams(184, 0)) {
    int i = 0;
    double t = 0, scale=1;
    vector<Rect> faces, faces2;
    const static Scalar colors[] =
    {
        Scalar(0,0,255),
        Scalar(0,128,255),
        Scalar(0,255,255),
        Scalar(0,255,0),
        Scalar(255,128,0),
        Scalar(255,255,0),
        Scalar(255,0,0),
        Scalar(255,0,255)
    };
    static UMat gray, smallImg;

    t = (double)getTickCount();

    resize( img, smallImg, Size(), scale0, scale0, INTER_LINEAR );
    cvtColor( smallImg, gray, COLOR_BGR2GRAY );
    equalizeHist( gray, gray );

                cascade.detectMultiScale( gray, faces,
                    1.1, 3, 0
                    //|CASCADE_FIND_BIGGEST_OBJECT
                    //|CASCADE_DO_ROUGH_SEARCH
                    |CASCADE_SCALE_IMAGE,
                    Size(30, 30) );

                flip(gray, gray, 1);
                cascade.detectMultiScale( gray, faces2,
                                         1.1, 2, 0
                                         //|CASCADE_FIND_BIGGEST_OBJECT
                                         //|CASCADE_DO_ROUGH_SEARCH
                                         |CASCADE_SCALE_IMAGE
                                         ,
                                         Size(30, 30) );

    for( vector<Rect>::const_iterator r = faces2.begin(); r !=faces2.end(); r++ )    
        faces.push_back(Rect(smallImg.cols - r->x - r->width, r->y,r->width, r->height));

    t = (double)getTickCount() - t;
    smallImg.copyTo(canvas);

    for( vector<Rect>::const_iterator r = faces.begin(); r !=faces.end(); r++, i++ ) {
        Point center;
        Scalar color = colors[i%8];
        int radius;

        double aspect_ratio = (double)r->width/r->height;
        if( 0.75 < aspect_ratio && aspect_ratio < 1.3 )
        {
            center.x = cvRound((r->x + r->width*0.5)*scale);
            center.y = cvRound((r->y + r->height*0.5)*scale);
            radius = cvRound((r->width + r->height)*0.25*scale);
            circle( canvas, center, radius, color, 3, 8, 0 );
        }
        else
            rectangle( canvas, Point(cvRound(r->x*scale), cvRound(r->y*scale)),
                       Point(cvRound((r->x + r->width-1)*scale),cvRound((r->y + r->height-1)*scale)),
                       color, 3, 8, 0);

    }
}
}