void sg::DoGCenterSurround::operator()( cv::gpu::GpuMat const & input, cv::gpu::GpuMat & output )
{
	if( !itsIsValid )
		return;

	// Zero the output 
	output.setTo( 0.0f );

	for( auto & filter : itsFilters )
	{
		cv::gpu::mulSpectrums( input, filter, itsBuffer, cv::DFT_COMPLEX_OUTPUT );
		addFC2Wrapper( output, itsBuffer, output );
	}

	mulValueFC2Wrapper( output, 1.0f / itsFilters.size(), output );
}
예제 #2
0
파일: gmg.cpp 프로젝트: AaronPlay/opencv
void cv::gpu::GMG_GPU::operator ()(const cv::gpu::GpuMat& frame, cv::gpu::GpuMat& fgmask, float newLearningRate, cv::gpu::Stream& stream)
{
    using namespace cv::gpu::cudev::bgfg_gmg;

    typedef void (*func_t)(PtrStepSzb frame, PtrStepb fgmask, PtrStepSzi colors, PtrStepf weights, PtrStepi nfeatures,
                           int frameNum, float learningRate, bool updateBackgroundModel, cudaStream_t stream);
    static const func_t funcs[6][4] =
    {
        {update_gpu<uchar>, 0, update_gpu<uchar3>, update_gpu<uchar4>},
        {0,0,0,0},
        {update_gpu<ushort>, 0, update_gpu<ushort3>, update_gpu<ushort4>},
        {0,0,0,0},
        {0,0,0,0},
        {update_gpu<float>, 0, update_gpu<float3>, update_gpu<float4>}
    };

    CV_Assert(frame.depth() == CV_8U || frame.depth() == CV_16U || frame.depth() == CV_32F);
    CV_Assert(frame.channels() == 1 || frame.channels() == 3 || frame.channels() == 4);

    if (newLearningRate != -1.0f)
    {
        CV_Assert(newLearningRate >= 0.0f && newLearningRate <= 1.0f);
        learningRate = newLearningRate;
    }

    if (frame.size() != frameSize_)
        initialize(frame.size(), 0.0f, frame.depth() == CV_8U ? 255.0f : frame.depth() == CV_16U ? std::numeric_limits<ushort>::max() : 1.0f);

    fgmask.create(frameSize_, CV_8UC1);
    fgmask.setTo(cv::Scalar::all(0), stream);

    funcs[frame.depth()][frame.channels() - 1](frame, fgmask, colors_, weights_, nfeatures_, frameNum_, learningRate, updateBackgroundModel, cv::gpu::StreamAccessor::getStream(stream));

    // medianBlur
    if (smoothingRadius > 0)
    {
        boxFilter_->apply(fgmask, buf_, stream);
        int minCount = (smoothingRadius * smoothingRadius + 1) / 2;
        double thresh = 255.0 * minCount / (smoothingRadius * smoothingRadius);
        cv::gpu::threshold(buf_, fgmask, thresh, 255.0, cv::THRESH_BINARY, stream);
    }

    // keep track of how many frames we have processed
    ++frameNum_;
}
예제 #3
0
    void detectKeypoints(cv::gpu::GpuMat& keypoints, int scales)
    {

        ensureSizeIsEnough(SIFT_GPU::ROWS_COUNT, MAXEXTREMAS, CV_32FC1, keypoints);

        keypoints.setTo(cv::Scalar::all(0));
        for (int octave = 0; octave < 1; ++octave)
        {
            const int scaleCols = cols >> octave;
            const int scaleRows = rows >> octave;

            createDoGSpace(inImage.data, &deviceDoGData, scales, scaleRows, scaleCols);
            findExtremas(deviceDoGData, &sift_.extremaBuffer, &maxCounter, octave, scales, scaleRows, scaleCols);
            localization(deviceDoGData, scaleRows, scaleCols, scales, octave, sift_.nOctaves, sift_.extremaBuffer, maxCounter,
                         keypoints.ptr<float>(SIFT_GPU::X_ROW), keypoints.ptr<float>(SIFT_GPU::Y_ROW),
                         keypoints.ptr<float>(SIFT_GPU::OCTAVE_ROW),	keypoints.ptr<float>(SIFT_GPU::SIZE_ROW),
                         keypoints.ptr<float>(SIFT_GPU::ANGLE_ROW), keypoints.ptr<float>(SIFT_GPU::RESPONSE_ROW));
        }

        std::cout << "Number of keypoints: " << maxCounter[0] << std::endl;
    }
void sg::LogGabor::getEdgeResponses( cv::gpu::GpuMat const & fftImage, cv::gpu::GpuMat & edges,
                                     std::vector<cv::gpu::GpuMat> & splitBuffer, DoGCenterSurround & dog )
{
    if( !itsValid )
    {
        std::cerr << "This LogGabor bank is not initialized!\n";
        return;
    }

    edges.setTo( 0.0f );

    // outer loop for orientations
    for( size_t o = 0; o < itsFilters.size(); ++o )
    {
        // inner loop over scales
        for( size_t s = 0; s < itsGaborScales.size(); ++s )
        {
            // see note in addFilters about why this is a normal multiplication
            // and not a spectrum multiply
            mulFC2Wrapper( fftImage, itsFilters[o][s], itsFFTBuffer[0] );
            cv::gpu::dft( itsFFTBuffer[0], itsFFTBuffer[1], itsFFTBuffer[0].size(), cv::DFT_INVERSE );

            // Get magnitude
            cv::gpu::magnitude( itsFFTBuffer[1], splitBuffer[0] );

            // Compute edge response
            // 	the edge response is a DoG applied to the magnitude
            // 	so we'll pad the magnitude with zero valued complex
            // 	take the DFT and then pass it through the DoG processing chain
            splitBuffer[1].setTo( 0.0f );
            cv::gpu::GpuMat merge[] = { splitBuffer[0], splitBuffer[1] };
            cv::gpu::merge( merge, 2, itsFFTBuffer[0] );
            cv::gpu::dft( itsFFTBuffer[0], itsFFTBuffer[0], itsFFTBuffer[0].size() );
            dog( itsFFTBuffer[0], itsFFTBuffer[2] );

            // accumulate result into edges
            addFC2Wrapper( edges, itsFFTBuffer[2], edges );
        } // end scales
    } // end orientations
}