Ejemplo n.º 1
0
static void processFrameGPU(cv::gpu::GpuMat &m)
{
	static cv::gpu::GpuMat aux;
	static cv::gpu::GpuMat aux2;
	static cv::gpu::GpuMat aux3;

	cv::gpu::cvtColor(m, aux, CV_BGR2GRAY);
	cv::gpu::GaussianBlur(aux,aux2, cv::Size(7,7),1.5);
	cv::gpu::Canny(aux2, aux3, 2, 15);
	aux3.copyTo(m);
}
Ejemplo n.º 2
0
void cv::softcascade::SCascade::detect(InputArray _image, InputArray _rois, OutputArray _objects, cv::gpu::Stream& s) const
{
    CV_Assert(fields);

    // only color images and precomputed integrals are supported
    int type = _image.type();
    CV_Assert(type == CV_8UC3 || type == CV_32SC1 || (!_rois.empty()));

    const cv::gpu::GpuMat image = _image.getGpuMat();

    if (_objects.empty()) _objects.create(1, 4096 * sizeof(Detection), CV_8UC1);

    cv::gpu::GpuMat rois = _rois.getGpuMat(), objects = _objects.getGpuMat();

    /// roi
    Fields& flds = *fields;
    int shr = flds.shrinkage;

    flds.mask.create( rois.cols / shr, rois.rows / shr, rois.type());

    device::shrink(rois, flds.mask);
    //cv::gpu::transpose(flds.genRoiTmp, flds.mask, s);

    if (type == CV_8UC3)
    {
        flds.update(image.rows, image.cols, flds.shrinkage);

        if (flds.check((float)minScale, (float)maxScale, scales))
            flds.createLevels(image.rows, image.cols);

        flds.preprocessor->apply(image, flds.shrunk);
        integral(flds.shrunk, flds.hogluv, flds.integralBuffer, s);
    }
    else
    {
        if (s)
            s.enqueueCopy(image, flds.hogluv);
        else
            image.copyTo(flds.hogluv);
    }

    flds.detect(objects, s);

    if ( (flags && NMS_MASK) != NO_REJECT)
    {
        cv::gpu::GpuMat spr(objects, cv::Rect(0, 0, flds.suppressed.cols, flds.suppressed.rows));
        flds.suppress(objects, s);
        flds.suppressed.copyTo(spr);
    }
}
void LukasKanadeOpticalFlow::drawMotionField_GPU(cv::gpu::GpuMat &imgU, cv::gpu::GpuMat &imgV, cv::gpu::GpuMat &imgMotion,
					 int xSpace, int ySpace, float minCutoff, float maxCutoff, float multiplier, CvScalar color)
{
	cv::Mat uMat( imgU );
	cv::Mat vMat( imgV );
	cv::Mat drawMat(cv::Size( imgU.size().width, imgU.size().height), CV_8UC3 );
	int x = 0, y = 0;
	float *ptri;
	float deltaX = 0.0, deltaY = 0.0, angle = 0.0, hyp = 0.0;
	cv::Point p0, p1;

	for( y = ySpace; y < uMat.rows; y += ySpace )
	{
		for(x = xSpace; x < uMat.cols; x += xSpace )
		{
			p0.x = x;
			p0.y = y;

			ptri = uMat.ptr<float>(y);
			deltaX = ptri[x];

			ptri = vMat.ptr<float>(y);
			deltaY = ptri[x];

			angle = atan2(deltaY, deltaX);
			hyp = sqrt(deltaX*deltaX + deltaY*deltaY);

			if( hyp > minCutoff && hyp < maxCutoff )
			{
				p1.x = p0.x + cvRound(multiplier*hyp*cos(angle));
				p1.y = p0.y + cvRound(multiplier*hyp*sin(angle));

				cv::line(drawMat,p0,p1,color,1,CV_AA,0);

				/*
				p0.x = p1.x + cvRound(2*cos(angle-M_PI + M_PI/4));
				p0.y = p1.y + cvRound(2*sin(angle-M_PI + M_PI/4));
				cv::line( imgMotion, p0, p1, color,1, CV_AA, 0);

				p0.x = p1.x + cvRound(2*cos(angle-M_PI - M_PI/4));
				p0.y = p1.y + cvRound(2*sin(angle-M_PI - M_PI/4));
				cv::line( imgMotion, p0, p1, color,1, CV_AA, 0);
				*/
			}
		}
	}

	imgMotion.upload( drawMat );
}
void sg::uncropFFTGPU( cv::gpu::GpuMat const & input, cv::gpu::GpuMat & output, std::vector<cv::gpu::GpuMat> & splitBuffer )
{
	cv::Size originalSize = input.size();
	cv::Size oldSize = output.size();

  if( input.channels() > 1 )
  {
		cv::gpu::split( input, splitBuffer );
    splitBuffer[0]( cv::Rect( 0, 0, oldSize.width, oldSize.height ) ).copyTo( output );
  }
	else
  	input( cv::Rect( 0, 0, oldSize.width, oldSize.height ) ).copyTo( output );

	cv::gpu::multiply( output, ( 1.0 / ( originalSize.width * originalSize.height ) ), output );
}
void MotionSubtraction::subtractGlobalMotion(cv::gpu::GpuMat &in__flowVector3DAngle, cv::gpu::GpuMat &in__flowVector3DMagnitude, cv::gpu::GpuMat &in__globalMotionX, cv::gpu::GpuMat &in__globalMotionY, cv::gpu::GpuMat &out__subtractedAngle, cv::gpu::GpuMat &out__subtractedMagnitude) {
    const int64 start = cv::getTickCount();

    //----------------------------------------------------------------------------------------
    // convert from magnitde/angle to x/y
    //----------------------------------------------------------------------------------------
    cv::gpu::GpuMat subtractedX(in__flowVector3DAngle.size(), CV_32FC1, cv::Scalar(0.0f));
    cv::gpu::GpuMat subtractedY(in__flowVector3DAngle.size(), CV_32FC1, cv::Scalar(0.0f));
    cv::gpu::GpuMat flowVector3DX, flowVector3DY;
    cv::gpu::polarToCart(in__flowVector3DMagnitude, in__flowVector3DAngle, flowVector3DX, flowVector3DY, true);



    //---------------------------------------------------------------------------------------------------------------------------------
    // segmentation
    //---------------------------------------------------------------------------------------------------------------------------------
    kernel.subtractMotion(flowVector3DX, flowVector3DY, in__globalMotionX, in__globalMotionY, subtractedX, subtractedY);



    //----------------------------------------------------------------------------------------
    // convert from x/y to magnitde/angle
    //----------------------------------------------------------------------------------------
    cv::gpu::cartToPolar(subtractedX, subtractedY, out__subtractedMagnitude, out__subtractedAngle, true);



    //---------------------------------------------------------------------------------------------------------------------------------
    // display computation time
    //---------------------------------------------------------------------------------------------------------------------------------
    const double timeSec = (cv::getTickCount() - start) / cv::getTickFrequency();
    std::cout << "Motion subtr : \t" << timeSec << " sec" << std::endl;

}
Ejemplo n.º 6
0
void buildPyramid(const cv::gpu::GpuMat& cvgmGray_){
	cvgmGray_.copyTo(*_acvgmShrPtrPyrBWs[0]);
	for (int n=0; n< 3; n++){
		cv::gpu::resize(*_acvgmShrPtrPyrBWs[n],*_acvgmShrPtrPyrBWs[n+1],cv::Size(0,0),.5f,.5f );	
	}
	return;
}
Ejemplo n.º 7
0
void CFast::operator ()(const cv::gpu::GpuMat& cvgmImage_, const cv::gpu::GpuMat& cvgmMask_, std::vector<cv::KeyPoint>* pvKeyPoints_)
{
	if (cvgmImage_.empty())
		return;

	(*this)(cvgmImage_, cvgmMask_, &_cvgmdKeyPoints);
	downloadKeypoints(_cvgmdKeyPoints, &*pvKeyPoints_);
}
Ejemplo n.º 8
0
//! find keypoints and compute it's response if _bNonMaxSupression is true
//! return count of detected keypoints
//return the # of keypoints, stored in _uCount;
//store Key point locations into _cvgmKeyPointLocation;
//store the corner strength into _cvgmScore;
int CFast::calcKeyPointsLocation(const cv::gpu::GpuMat& cvgmImg_, const cv::gpu::GpuMat& cvgmMask_)
{
	//using namespace cv::gpu::device::fast;

	CV_Assert(cvgmImg_.type() == CV_8UC1);
	CV_Assert(cvgmMask_.empty() || (cvgmMask_.type() == CV_8UC1 && cvgmMask_.size() == cvgmImg_.size()));

	if (!cv::gpu::TargetArchs::builtWith(cv::gpu::GLOBAL_ATOMICS) || !cv::gpu::DeviceInfo().supports(cv::gpu::GLOBAL_ATOMICS))
		CV_Error(CV_StsNotImplemented, "The device doesn't support global atomics");

	unsigned int uMaxKeypoints = static_cast<unsigned int>(_dKeyPointsRatio * cvgmImg_.size().area());

	ensureSizeIsEnough(1, uMaxKeypoints, CV_16SC2, _cvgmKeyPointLocation);

	if (_bNonMaxSupression)
	{
		ensureSizeIsEnough(cvgmImg_.size(), CV_32SC1, _cvgmScore);
		_cvgmScore.setTo(cv::Scalar::all(0));
	}

	_uCount = btl::device::fast::cudaCalcKeypoints(cvgmImg_, cvgmMask_, uMaxKeypoints, _nThreshold, _cvgmKeyPointLocation.ptr<short2>(), _bNonMaxSupression ? &_cvgmScore : NULL);
	_uCount = std::min(_uCount, uMaxKeypoints);

	return _uCount;
}
void Tester::testDepth(cv::gpu::GpuMat &testDepth, cv::gpu::GpuMat &groundTruthMap, float &differenceValue) {

    // download depthmaps
    cv::Mat depth, groundTruth;
    testDepth.download(depth);
    groundTruthMap.download(groundTruth);

    float sum = 0.0f;

    for (int y = 0; y < depth.rows; y++) {
        for (int x = 0; x < depth.cols; x++) {
            sum = (groundTruth.at<float>(y,x) - depth.at<float>(y,x)) * (groundTruth.at<float>(y,x) - depth.at<float>(y,x));
        }
    }

    differenceValue = sum;
    file << "Depth error: " << differenceValue << "\n";

}
		void meanStdDev(const cv::gpu::GpuMat& mtx,cv::Scalar& mean, cv::Scalar& stddev){
			assert(1==mtx.channels());

			size_t elem_num = mtx.rows * mtx.cols;
			cv::gpu::GpuMat buf;
			mean = cv::gpu::sum( mtx, buf);
			stddev = cv::gpu::sqrSum( mtx, buf);
			for(int i=0;i<mean.cols;i++){
				mean[i] /= elem_num;
			}
			stddev = cv::gpu::sqrSum( mtx, buf);
			for(int i=0;i<stddev.cols;i++){
				stddev[i] /= elem_num;
				stddev[i] = std::sqrt(stddev[i] - std::pow(mean[i],2));
			}
		}
void sg::DoGCenterSurround::operator()( cv::gpu::GpuMat const & input, cv::gpu::GpuMat & output )
{
	if( !itsIsValid )
		return;

	// Zero the output 
	output.setTo( 0.0f );

	for( auto & filter : itsFilters )
	{
		cv::gpu::mulSpectrums( input, filter, itsBuffer, cv::DFT_COMPLEX_OUTPUT );
		addFC2Wrapper( output, itsBuffer, output );
	}

	mulValueFC2Wrapper( output, 1.0f / itsFilters.size(), output );
}
Ejemplo n.º 12
0
    void detectKeypoints(cv::gpu::GpuMat& keypoints, int scales)
    {

        ensureSizeIsEnough(SIFT_GPU::ROWS_COUNT, MAXEXTREMAS, CV_32FC1, keypoints);

        keypoints.setTo(cv::Scalar::all(0));
        for (int octave = 0; octave < 1; ++octave)
        {
            const int scaleCols = cols >> octave;
            const int scaleRows = rows >> octave;

            createDoGSpace(inImage.data, &deviceDoGData, scales, scaleRows, scaleCols);
            findExtremas(deviceDoGData, &sift_.extremaBuffer, &maxCounter, octave, scales, scaleRows, scaleCols);
            localization(deviceDoGData, scaleRows, scaleCols, scales, octave, sift_.nOctaves, sift_.extremaBuffer, maxCounter,
                         keypoints.ptr<float>(SIFT_GPU::X_ROW), keypoints.ptr<float>(SIFT_GPU::Y_ROW),
                         keypoints.ptr<float>(SIFT_GPU::OCTAVE_ROW),	keypoints.ptr<float>(SIFT_GPU::SIZE_ROW),
                         keypoints.ptr<float>(SIFT_GPU::ANGLE_ROW), keypoints.ptr<float>(SIFT_GPU::RESPONSE_ROW));
        }

        std::cout << "Number of keypoints: " << maxCounter[0] << std::endl;
    }
void sg::LogGabor::getEdgeResponses( cv::gpu::GpuMat const & fftImage, cv::gpu::GpuMat & edges,
                                     std::vector<cv::gpu::GpuMat> & splitBuffer, DoGCenterSurround & dog )
{
    if( !itsValid )
    {
        std::cerr << "This LogGabor bank is not initialized!\n";
        return;
    }

    edges.setTo( 0.0f );

    // outer loop for orientations
    for( size_t o = 0; o < itsFilters.size(); ++o )
    {
        // inner loop over scales
        for( size_t s = 0; s < itsGaborScales.size(); ++s )
        {
            // see note in addFilters about why this is a normal multiplication
            // and not a spectrum multiply
            mulFC2Wrapper( fftImage, itsFilters[o][s], itsFFTBuffer[0] );
            cv::gpu::dft( itsFFTBuffer[0], itsFFTBuffer[1], itsFFTBuffer[0].size(), cv::DFT_INVERSE );

            // Get magnitude
            cv::gpu::magnitude( itsFFTBuffer[1], splitBuffer[0] );

            // Compute edge response
            // 	the edge response is a DoG applied to the magnitude
            // 	so we'll pad the magnitude with zero valued complex
            // 	take the DFT and then pass it through the DoG processing chain
            splitBuffer[1].setTo( 0.0f );
            cv::gpu::GpuMat merge[] = { splitBuffer[0], splitBuffer[1] };
            cv::gpu::merge( merge, 2, itsFFTBuffer[0] );
            cv::gpu::dft( itsFFTBuffer[0], itsFFTBuffer[0], itsFFTBuffer[0].size() );
            dog( itsFFTBuffer[0], itsFFTBuffer[2] );

            // accumulate result into edges
            addFC2Wrapper( edges, itsFFTBuffer[2], edges );
        } // end scales
    } // end orientations
}
Ejemplo n.º 14
0
void cv::gpu::MOG_GPU::operator()(const cv::gpu::GpuMat& frame, cv::gpu::GpuMat& fgmask, float learningRate, Stream& stream)
{
    using namespace cv::gpu::cudev::mog;

    CV_Assert(frame.depth() == CV_8U);

    int ch = frame.channels();
    int work_ch = ch;

    if (nframes_ == 0 || learningRate >= 1.0 || frame.size() != frameSize_ || work_ch != mean_.channels())
        initialize(frame.size(), frame.type());

    fgmask.create(frameSize_, CV_8UC1);

    ++nframes_;
    learningRate = learningRate >= 0.0f && nframes_ > 1 ? learningRate : 1.0f / std::min(nframes_, history);
    CV_Assert(learningRate >= 0.0f);

    mog_gpu(frame, ch, fgmask, weight_, sortKey_, mean_, var_, nmixtures_,
            varThreshold, learningRate, backgroundRatio, noiseSigma,
            StreamAccessor::getStream(stream));
}
Ejemplo n.º 15
0
void CFast::downloadKeypoints(const cv::gpu::GpuMat& cvgmKeyPoints_, std::vector<cv::KeyPoint>* pvKeyPoints_)
{
	if (cvgmKeyPoints_.empty())	return;
	cv::Mat cvmKeyPoints(cvgmKeyPoints_);//download from cvgm to cvm
	convertKeypoints(cvmKeyPoints, &*pvKeyPoints_);
}
Ejemplo n.º 16
0
void cv::gpu::GMG_GPU::operator ()(const cv::gpu::GpuMat& frame, cv::gpu::GpuMat& fgmask, float newLearningRate, cv::gpu::Stream& stream)
{
    using namespace cv::gpu::cudev::bgfg_gmg;

    typedef void (*func_t)(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures,
                           int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
    static const func_t funcs[6][4] =
    {
        {update_gpu<uchar>, 0, update_gpu<uchar3>, update_gpu<uchar4>},
        {0,0,0,0},
        {update_gpu<ushort>, 0, update_gpu<ushort3>, update_gpu<ushort4>},
        {0,0,0,0},
        {0,0,0,0},
        {update_gpu<float>, 0, update_gpu<float3>, update_gpu<float4>}
    };

    CV_Assert(frame.depth() == CV_8U || frame.depth() == CV_16U || frame.depth() == CV_32F);
    CV_Assert(frame.channels() == 1 || frame.channels() == 3 || frame.channels() == 4);

    if (newLearningRate != -1.0f)
    {
        CV_Assert(newLearningRate >= 0.0f && newLearningRate <= 1.0f);
        learningRate = newLearningRate;
    }

    if (frame.size() != frameSize_)
        initialize(frame.size(), 0.0f, frame.depth() == CV_8U ? 255.0f : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0f);

    fgmask.create(frameSize_, CV_8UC1);
    fgmask.setTo(cv::Scalar::all(0), stream);

    funcs[frame.depth()][frame.channels() - 1](frame, fgmask, colors_, weights_, nfeatures_, frameNum_, learningRate, updateBackgroundModel, cv::gpu::StreamAccessor::getStream(stream));

    // medianBlur
    if (smoothingRadius > 0)
    {
        boxFilter_->apply(fgmask, buf_, stream);
        int minCount = (smoothingRadius * smoothingRadius + 1) / 2;
        double thresh = 255.0 * minCount / (smoothingRadius * smoothingRadius);
        cv::gpu::threshold(buf_, fgmask, thresh, 255.0, cv::THRESH_BINARY, stream);
    }

    // keep track of how many frames we have processed
    ++frameNum_;
}