Esempio n. 1
0
void StereoSingleGpu::compute(const Mat& leftFrame, const Mat& rightFrame, Mat& disparity)
{
    cuda::setDevice(deviceId_);
    d_leftFrame.upload(leftFrame);
    d_rightFrame.upload(rightFrame);
    d_alg->compute(d_leftFrame, d_rightFrame, d_disparity);
    d_disparity.download(disparity);
}
GpuMat* ImageImPro_OpenCvImpl::getGPUMat(){
    Mat* ptrMat = this->getMat();
    GpuMat* ptrGpuMat = new GpuMat();
    ptrGpuMat->upload(*ptrMat);
    delete ptrMat;
    return ptrGpuMat;
}
Esempio n. 3
0
/* Load the mean file in binaryproto format. */
void Classifier::SetMean(const string& mean_file)
{
    BlobProto blob_proto;
    ReadProtoFromBinaryFileOrDie(mean_file.c_str(), &blob_proto);

    /* Convert from BlobProto to Blob<float> */
    Blob<float> mean_blob;
    mean_blob.FromProto(blob_proto);
    CHECK_EQ(mean_blob.channels(), num_channels_)
        << "Number of channels of mean file doesn't match input layer.";

    /* The format of the mean file is planar 32-bit float BGR or grayscale. */
    std::vector<Mat> channels;
    float* data = mean_blob.mutable_cpu_data();
    for (int i = 0; i < num_channels_; ++i)
    {
        /* Extract an individual channel. */
        Mat channel(mean_blob.height(), mean_blob.width(), CV_32FC1, data);
        channels.push_back(channel);
        data += mean_blob.height() * mean_blob.width();
    }

    /* Merge the separate channels into a single image. */
    Mat packed_mean;
    merge(channels, packed_mean);

    /* Compute the global mean pixel value and create a mean image
     * filled with this value. */
    Scalar channel_mean = mean(packed_mean);
    Mat host_mean = Mat(input_geometry_, packed_mean.type(), channel_mean);
    mean_.upload(host_mean);
}
Esempio n. 4
0
int main( int argc, char** argv )
{
    char* filename = argc == 2 ? argv[1] : (char*)"baboon.jpg";
    if (string(argv[1]) == "--help")
    {
        help();
        return -1;
    }

    src.upload(imread(filename, 1));
    if (src.empty())
    {
        help();
        return -1;
    }

    cv::gpu::printShortCudaDeviceInfo(cv::gpu::getDevice());

    help();


    if (src.channels() == 3)
    {
        // gpu support only 4th channel images
        GpuMat src4ch;
        cv::gpu::cvtColor(src, src4ch, COLOR_BGR2BGRA);
        src = src4ch;
    }

    //create windows for output images
    namedWindow("Open/Close",1);
    namedWindow("Erode/Dilate",1);

    open_close_pos = erode_dilate_pos = max_iters;
    createTrackbar("iterations", "Open/Close",&open_close_pos,max_iters*2+1,OpenClose);
    createTrackbar("iterations", "Erode/Dilate",&erode_dilate_pos,max_iters*2+1,ErodeDilate);

    for(;;)
    {
        int c;

        OpenClose(open_close_pos, 0);
        ErodeDilate(erode_dilate_pos, 0);
        c = waitKey();

        if( (char)c == 27 )
            break;
        if( (char)c == 'e' )
            element_shape = MORPH_ELLIPSE;
        else if( (char)c == 'r' )
            element_shape = MORPH_RECT;
        else if( (char)c == 'c' )
            element_shape = MORPH_CROSS;
        else if( (char)c == ' ' )
            element_shape = (element_shape + 1) % 3;
    }

    return 0;
}
void cv::gpu::BruteForceMatcher_GPU_base::makeGpuCollection(GpuMat& trainCollection, GpuMat& maskCollection,
    const vector<GpuMat>& masks)
{
    if (empty())
        return;

    if (masks.empty())
    {
        Mat trainCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(DevMem2Db)));

        DevMem2Db* trainCollectionCPU_ptr = trainCollectionCPU.ptr<DevMem2Db>();

        for (size_t i = 0, size = trainDescCollection.size(); i < size; ++i, ++trainCollectionCPU_ptr)
            *trainCollectionCPU_ptr = trainDescCollection[i];

        trainCollection.upload(trainCollectionCPU);
        maskCollection.release();
    }
    else
    {
        CV_Assert(masks.size() == trainDescCollection.size());

        Mat trainCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(DevMem2Db)));
        Mat maskCollectionCPU(1, static_cast<int>(trainDescCollection.size()), CV_8UC(sizeof(PtrStepb)));

        DevMem2Db* trainCollectionCPU_ptr = trainCollectionCPU.ptr<DevMem2Db>();
        PtrStepb* maskCollectionCPU_ptr = maskCollectionCPU.ptr<PtrStepb>();

        for (size_t i = 0, size = trainDescCollection.size(); i < size; ++i, ++trainCollectionCPU_ptr, ++maskCollectionCPU_ptr)
        {
            const GpuMat& train = trainDescCollection[i];
            const GpuMat& mask = masks[i];

            CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.cols == train.rows));

            *trainCollectionCPU_ptr = train;
            *maskCollectionCPU_ptr = mask;
        }

        trainCollection.upload(trainCollectionCPU);
        maskCollection.upload(maskCollectionCPU);
    }
}
Esempio n. 6
0
        LevelsInit()
        {
            nValues3[0] = nValues3[1] = nValues3[2] = 256;
            for (int i = 0; i < 256; ++i)
                pLevels[i] = i;


#if (CUDA_VERSION <= 4020)
            pLevels3[0] = pLevels3[1] = pLevels3[2] = pLevels;
#else
            d_pLevels.upload(Mat(1, 256, CV_32S, pLevels));
            pLevels3[0] = pLevels3[1] = pLevels3[2] = d_pLevels.ptr<Npp32s>();
#endif
        }
Esempio n. 7
0
GpuMat cv::superres::arrGetGpuMat(InputArray arr, GpuMat& buf)
{
    switch (arr.kind())
    {
    case _InputArray::GPU_MAT:
        return arr.getGpuMat();

    case _InputArray::OPENGL_BUFFER:
        arr.getOGlBuffer().copyTo(buf);
        return buf;

    default:
        buf.upload(arr.getMat());
        return buf;
    }
}
Esempio n. 8
0
GpuMat cv::cuda::getInputMat(InputArray _src, Stream& stream)
{
    GpuMat src;

#ifndef HAVE_CUDA
    (void) _src;
    (void) stream;
    throw_no_cuda();
#else
    if (_src.kind() == _InputArray::CUDA_GPU_MAT)
    {
        src = _src.getGpuMat();
    }
    else if (!_src.empty())
    {
        BufferPool pool(stream);
        src = pool.getBuffer(_src.size(), _src.type());
        src.upload(_src, stream);
    }
#endif

    return src;
}
int main( int argc, const char** argv )
{

	VideoCapture cap;
	Rect trackWindow;

	struct timeval timea, timeb, timeS, timeE;
	long totalTime = 0, matchTime = 0, convertTime = 0, loadTime = 0;
	int nFrames = 0;

	cap.open("/home/ubuntu/Aerial/photos/SoccerGoal2_464.mp4"); //open smaller video file (reccomended for Jetson)
//	cap.open("/home/scott/Aerial//aerial_navigation/photos/SoccerGoal2.mp4"); //open regular video file (desktop)

	cerr << cap.get(CV_CAP_PROP_FRAME_WIDTH) << endl;
	cerr << cap.get(CV_CAP_PROP_FRAME_HEIGHT) << endl;
	vector<string> screenshots;
	//smaller training images (Jetson)
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh1_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh2_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh3_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh4_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh5_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh6_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh7_464.png");
	screenshots.push_back("/home/ubuntu/Aerial/WicketTraining/sh8_464.png");
	//regular training images (Jetson)
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh1.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh2.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh3.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh4.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh5.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh6.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh7.png");
//	screenshots.push_back("/home/scott/Aerial/aerial_navigation/WicketTraining/sh8.png");


	if( !cap.isOpened() ) //make sure video file could be opened
	{
		cout << "***Could not initialize capturing...***\n";
		return -1;
	}
	//define the shape that is to be used for errode and dilate. Change size to increase or decrease the amount erroded and dilated
	Mat element = getStructuringElement(element_shape, Size(3, 3), Point(-1, -1) );

	//Initialize kalman filter
	KalmanFilter KF(4, 2, 0);
	Mat_<float> measurement(2,1); measurement.setTo(Scalar(0));
	Point pt(0, 0);

	//intialize display window
	namedWindow( "TrackingWicket", 0 );
	setMouseCallback( "TrackingWicket", onMouse, 0 );

	Rect bb; //rectangle for used for masking image to decrease template match search time

	//state variables
	bool paused = false;
	bool debug = true;

	cap >> frame0; //load the first frame
	paused = true; //paused for training
	vector<int> index(8); //indexes of the training images
	Point2f ctr_point, kal_point;

	ctr_point = pt; //point for the measured center of matched image
	kal_point = pt; //point for the corrected kalman filter eastimate

	//Gather training images
	for(int i = 0; i < screenshots.size(); i++){
		sh = imread(screenshots[i]); //read in trained image file

		for(;;){

			gpu_frame0.upload(sh); //upload image to gpu memory
			proccess_frame(element, thresh); //process the frame prior to selection

			gpu_gray.download(image); //download processed image so it can be displayed
			if(trackObject < 0) { //part of image has been selected so get the trained image

				mask_coll[i] = GpuMat(gpu_gray.size(), CV_8UC1, Scalar::all(0)); //intialize a mask
				mask_coll[i](selection).setTo(Scalar::all(255)); //set the mask to be the selected area
				gpu::bitwise_and(gpu_gray, mask_coll[i], train_coll[i]); //set the image to be only the parts in the mask
				train_coll[i] = train_coll[i](selection); //set the trained image to be just the size of the selection. I'm not sure that this process is the best way
				selections[i] = selection; //save the selection value for later use
				index[i] = i; // save the index value
				trackObject = 0; //set track object to 0 so we don't repeate this process until we have selected an object
				selectObject = 0; //reset the selection object state to no object
				break;
			}
			if( selectObject && selection.width > 0 && selection.height > 0 ) //if selecting an object show the area being selected
			{
				Mat mask(image, selection);
				bitwise_not(mask, mask);
			}
			imshow("TrackingWicket", image); //display the image
			waitKey(10);
		}
		break;
	}
	//loop over frames in video feed (breaks at end of file)
	for(;;)
	{
		gettimeofday(&timea, NULL); //start overal timer
		if( !paused )
		{
			gettimeofday(&timeS, NULL); //start image load timer
			cap >> frame0; //load next frame
			gettimeofday(&timeE, NULL); //end image load timer
			loadTime += getTimeDelta(timeS, timeE); //add to the load time
			nFrames++; //increment the frames proccessed count
			if( frame0.empty() ) //make sure we have a frame stored
				break;
		}
		if( !paused ) //skip if paused
		{
			if(trackObject < 0) { //if this is first pass through tracking do some initialization
				//set point p to be the center of the selected area
				Point p = Point(selection.tl().x + (selection.width / 2), selection.tl().y + (selection.height / 2));
				bb = selection; //bounding box for the search area is the selection

				kalman_init(KF, p, 1e-4, 1e-4, .1); //initialize kalman filter.

				ctr_point = pt;
				kal_point = pt;

				trackObject = 1; //set this so we don't come through here again
			}
			if(trackObject){

				Mat prediction = KF.predict(); //predict where the center of the match will be
				Point predictPt(prediction.at<float>(0),prediction.at<float>(1)); //get the point
				bool smallwindow = false;
				if(predictPt.x != 0 || predictPt.y != 0){ //if the predicted point isn't the first (which is bad) then use the predicted point to set the search box location
					smallwindow = true;
					selection.x = predictPt.x - (selection.width/2);
					selection.y = predictPt.y - (selection.height/2);
				}

				gettimeofday(&timeS, NULL); //start convert timer
				gpu_frame0.upload(frame0); //upload frame to gpu memory
				proccess_frame(element, thresh); //process the frame

				gettimeofday(&timeE, NULL); //stop convert timer
				convertTime += getTimeDelta(timeS, timeE);
				//gpu_gray.download(gray);

				double best_max_value = 0;
				Point best_location;
				Rect predictRect;
				int idx = 0;
				gettimeofday(&timeS, NULL); //start match template timer
				if(smallwindow){ //if we are using a small window to search for the template

					//change the dimensions of the search box to be 3 times bigger than the size of the train image/selection
					int wt = 1.5*selection.width;
					int ht = 1.5*selection.height;
					//set top left and bottom right locations of search area
					predictRect = Rect(predictPt.x - wt, predictPt.y - ht, predictPt.x + wt, predictPt.y + ht);

					GpuMat roi(gpu_gray, predictRect); //get area of image we want to search

					match_template(roi, train_coll, index, best_max_value, best_location, idx); //run template match

				} else //search the whole image (slow)
					match_template(gpu_gray, train_coll, index, best_max_value, best_location, idx); //run template match

				gettimeofday(&timeE, NULL); //end template match timer
				matchTime += getTimeDelta(timeS, timeE);

				if (best_max_value > .8){ //if the value found was better than .8 the update the found location. Otherwise we didn't find a good enough spot (this is not tuned and can be changed)
					if(smallwindow){
						best_location.x = best_location.x + predictRect.tl().x;
						best_location.y = best_location.y + predictRect.tl().y;
						bb = Rect(best_location.x,best_location.y, selections[index[idx]].width, selections[index[idx]].height);//box is now the size of the matched image and the location of the best fit
						box_update(KF, bb, measurement, ctr_point, kal_point); //update the current location of the image and bounding box
					} else {
						bb = Rect(best_location.x,best_location.y, selections[index[idx]].width, selections[index[idx]].height);
						box_update(KF, bb, measurement, ctr_point, kal_point);
					}
				} else //the object wasn't in our window so search the whole image to find it.
					smallwindow = false;

			}
		}


		if( trackObject < 0 ) {
			paused = false;
		}

		gettimeofday(&timeb, NULL); //stop total timer
		totalTime += getTimeDelta(timea, timeb);
		if(debug){ //if debugging then display the image and rectangles of where the kalman filter (red) things the best spot is and where the matched (yellow) spot is
			frame0.copyTo(image);
			rectangle(image, selection, Scalar(0, 0, 255), 1, 8, 0);
			rectangle(image, bb, Scalar(0, 255, 255), 1, 8, 0);
			circle( image, kal_point, 4, Scalar(0, 0, 255), -1, 8, 0 );
			circle( image, ctr_point, 4, Scalar(0, 255, 255), -1, 8, 0 );
			imshow( "TrackingWicket", image );
		}

		char c = (char)waitKey(10);
		if( c == 27 )
			break;
		switch(c)
		{
		case 'c':
			trackObject = 0;
			break;
		case 'd':
			debug = !debug;
			break;
		case 'p':
			paused = !paused;
			cout << "frames                       : " << nFrames << endl;
			cout << "TotalTime                    : " << double(totalTime)/1000000.0 << endl;
			cout << "FPS                          : " << double(nFrames)/(double(totalTime)/1000000.0) << endl;
			cout << "Percentage Convert Time      : " << double(convertTime)/double(totalTime) << endl;
			cout << "Percentage Match Time        : " << double(matchTime)/double(totalTime) << endl;
			cout << "Percentage Load Time         : " << double(loadTime)/double(totalTime) << endl;
			totalTime = matchTime = convertTime = loadTime = 0;
			nFrames = 0;
			break;
		default:
			;
		}
	}
Esempio n. 10
0
void cv::gpu::GoodFeaturesToTrackDetector_GPU::operator ()(const GpuMat& image, GpuMat& corners, const GpuMat& mask)
{
    using namespace cv::gpu::device::gfft;

    CV_Assert(qualityLevel > 0 && minDistance >= 0 && maxCorners >= 0);
    CV_Assert(mask.empty() || (mask.type() == CV_8UC1 && mask.size() == image.size()));

    ensureSizeIsEnough(image.size(), CV_32F, eig_);

    if (useHarrisDetector)
        cornerHarris(image, eig_, Dx_, Dy_, buf_, blockSize, 3, harrisK);
    else
        cornerMinEigenVal(image, eig_, Dx_, Dy_, buf_, blockSize, 3);

    double maxVal = 0;
    minMax(eig_, 0, &maxVal, GpuMat(), minMaxbuf_);

    ensureSizeIsEnough(1, std::max(1000, static_cast<int>(image.size().area() * 0.05)), CV_32FC2, tmpCorners_);

    int total = findCorners_gpu(eig_, static_cast<float>(maxVal * qualityLevel), mask, tmpCorners_.ptr<float2>(), tmpCorners_.cols);

    if (total == 0)
    {
        corners.release();
        return;
    }

    sortCorners_gpu(eig_, tmpCorners_.ptr<float2>(), total);

    if (minDistance < 1)
        tmpCorners_.colRange(0, maxCorners > 0 ? std::min(maxCorners, total) : total).copyTo(corners);
    else
    {
        vector<Point2f> tmp(total);
        Mat tmpMat(1, total, CV_32FC2, (void*)&tmp[0]);
        tmpCorners_.colRange(0, total).download(tmpMat);

        vector<Point2f> tmp2;
        tmp2.reserve(total);

        const int cell_size = cvRound(minDistance);
        const int grid_width = (image.cols + cell_size - 1) / cell_size;
        const int grid_height = (image.rows + cell_size - 1) / cell_size;

        std::vector< std::vector<Point2f> > grid(grid_width * grid_height);

        for (int i = 0; i < total; ++i)
        {
            Point2f p = tmp[i];

            bool good = true;

            int x_cell = static_cast<int>(p.x / cell_size);
            int y_cell = static_cast<int>(p.y / cell_size);

            int x1 = x_cell - 1;
            int y1 = y_cell - 1;
            int x2 = x_cell + 1;
            int y2 = y_cell + 1;

            // boundary check
            x1 = std::max(0, x1);
            y1 = std::max(0, y1);
            x2 = std::min(grid_width - 1, x2);
            y2 = std::min(grid_height - 1, y2);

            for (int yy = y1; yy <= y2; yy++)
            {
                for (int xx = x1; xx <= x2; xx++)
                {
                    vector<Point2f>& m = grid[yy * grid_width + xx];

                    if (!m.empty())
                    {
                        for(size_t j = 0; j < m.size(); j++)
                        {
                            float dx = p.x - m[j].x;
                            float dy = p.y - m[j].y;

                            if (dx * dx + dy * dy < minDistance * minDistance)
                            {
                                good = false;
                                goto break_out;
                            }
                        }
                    }
                }
            }

break_out:

            if(good)
            {
                grid[y_cell * grid_width + x_cell].push_back(p);

                tmp2.push_back(p);

                if (maxCorners > 0 && tmp2.size() == static_cast<size_t>(maxCorners))
                    break;
            }
        }

        corners.upload(Mat(1, static_cast<int>(tmp2.size()), CV_32FC2, &tmp2[0]));
    }
}
Esempio n. 11
0
Mat visionUtils::cannySegmentation(Mat img0, int minPixelSize, bool displayFaces)
{
    // Segments items in gray image (img0)
    // minPixelSize=
    // -1, returns largest region only
    // pixels, threshold for removing smaller regions, with less than minPixelSize pixels
    // 0, returns all detected segments


    // LB: Zero pad image to remove edge effects when getting regions....
    int padPixels=20;
    // Rect border added at start...
    Rect tempRect;
    tempRect.x=padPixels;
    tempRect.y=padPixels;
    tempRect.width=img0.cols;
    tempRect.height=img0.rows;

    Mat img1 = Mat::zeros(img0.rows+(padPixels*2), img0.cols+(padPixels*2), CV_8UC1);
    img0.copyTo(img1(tempRect));


    if (useGPU)// converted to GPU -> NOT tested to speed up here!
    {
        GpuMat imgGPU;
        imgGPU.upload(img1);
#if CV_MAJOR_VERSION == 2
        gpu::Canny(imgGPU, imgGPU, 100, 200, 3); //100, 200, 3);
#elif CV_MAJOR_VERSION == 3
        cv::Ptr<cv::cuda::CannyEdgeDetector> canny = cv::cuda::createCannyEdgeDetector(100, 200, 3);
        canny->detect(imgGPU, imgGPU);
#endif
        imgGPU.download(img1);
    }
    else
    {
        Canny(img1, img1, 100, 200, 3); //100, 200, 3);
    }


    // find the contours
    vector< vector<Point> > contours;
    findContours(img1, contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);

    // Mask for segmented regiond
    Mat mask = Mat::zeros(img1.rows, img1.cols, CV_8UC1);

    vector<double> areas(contours.size());

    if (minPixelSize==-1)
    {   // Case of taking largest region
        for(int i = 0; i < (int)contours.size(); i++)
            areas[i] = contourArea(Mat(contours[i]));
        double max;
        Point maxPosition;
        cv::minMaxLoc(Mat(areas),0,&max,0,&maxPosition);
        drawContours(mask, contours, maxPosition.y, Scalar(1), CV_FILLED);
    }
    else
    {   // Case for using minimum pixel size
        for (int i = 0; i < (int)contours.size(); i++)
        {
            if (contourArea(Mat(contours[i]))>minPixelSize)
                drawContours(mask, contours, i, Scalar(1), CV_FILLED);
        }
    }
    // normalize so imwrite(...)/imshow(...) shows the mask correctly!
    cv::normalize(mask.clone(), mask, 0.0, 255.0, CV_MINMAX, CV_8UC1);

    Mat returnMask;
    returnMask=mask(tempRect);

    // show the images
    if (displayFaces)   imshow("Canny: Img in", img0);
    if (displayFaces)   imshow("Canny: Mask", returnMask);
    if (displayFaces)   imshow("Canny: Output", img1);

    return returnMask;
}
Esempio n. 12
0
Mat visionUtils::skinDetect(Mat captureframe, Mat3b *skinDetectHSV, Mat *skinMask, std::vector<int> adaptiveHSV, int minPixelSize, int imgBlurPixels, int imgMorphPixels, int singleRegionChoice, bool displayFaces)
{

    if (adaptiveHSV.size()!=6 || adaptiveHSV.empty())
    {
        adaptiveHSV.clear();
        adaptiveHSV.push_back(5);
        adaptiveHSV.push_back(38);
        adaptiveHSV.push_back(51);
        adaptiveHSV.push_back(17);
        adaptiveHSV.push_back(250);
        adaptiveHSV.push_back(242);
    }


    //int step = 0;
    Mat3b frameTemp;
    Mat3b frame;
    // Forcing resize to 640x480 -> all thresholds / pixel filters configured for this size.....
    // Note returned to original size at end...
    Size s = captureframe.size();
    cv::resize(captureframe,captureframe,Size(640,480));



    if (useGPU)
    {
        GpuMat imgGPU, imgGPUHSV;
        imgGPU.upload(captureframe);
        cv::cvtColor(imgGPU, imgGPUHSV, CV_BGR2HSV);
        GaussianBlur(imgGPUHSV, imgGPUHSV, Size(imgBlurPixels,imgBlurPixels), 1, 1);
        imgGPUHSV.download(frameTemp);
    }
    else
    {
        cv::cvtColor(captureframe, frameTemp, CV_BGR2HSV);
        GaussianBlur(frameTemp, frameTemp, Size(imgBlurPixels,imgBlurPixels), 1, 1);
    }

    // Potential FASTER VERSION using inRange
    Mat frameThreshold = Mat::zeros(frameTemp.rows,frameTemp.cols, CV_8UC1);
    Mat hsvMin = (Mat_<int>(1,3) << adaptiveHSV[0], adaptiveHSV[1],adaptiveHSV[2] );
    Mat hsvMax = (Mat_<int>(1,3) << adaptiveHSV[3], adaptiveHSV[4],adaptiveHSV[5] );
    inRange(frameTemp,hsvMin ,hsvMax, frameThreshold);
    frameTemp.copyTo(frame,frameThreshold);

    /* BGR CONVERSION AND THRESHOLD */
    Mat1b frame_gray;

    // send HSV to skinDetectHSV for return
    *skinDetectHSV=frame.clone();

    cv::cvtColor(frame, frame_gray, CV_BGR2GRAY);


    // Adaptive thresholding technique
    // 1. Threshold data to find main areas of skin
    adaptiveThreshold(frame_gray,frame_gray,255,ADAPTIVE_THRESH_GAUSSIAN_C,THRESH_BINARY_INV,9,1);


    if (useGPU)
    {
        GpuMat imgGPU;
        imgGPU.upload(frame_gray);
        // 2. Fill in thresholded areas
#if CV_MAJOR_VERSION == 2
        gpu::morphologyEx(imgGPU, imgGPU, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2);
        gpu::GaussianBlur(imgGPU, imgGPU, Size(imgBlurPixels,imgBlurPixels), 1, 1);
#elif CV_MAJOR_VERSION == 3
        //TODO: Check if that's correct
        Mat element = getStructuringElement(MORPH_RECT, Size(imgMorphPixels, imgMorphPixels), Point(-1, -1));
        Ptr<cuda::Filter> closeFilter = cuda::createMorphologyFilter(MORPH_CLOSE, imgGPU.type(), element, Point(-1, -1), 2);
        closeFilter->apply(imgGPU, imgGPU);
        cv::Ptr<cv::cuda::Filter> gaussianFilter = cv::cuda::createGaussianFilter(imgGPU.type(), imgGPU.type(), Size(imgMorphPixels, imgMorphPixels), 1, 1);
        gaussianFilter->apply(imgGPU, imgGPU);
#endif

        imgGPU.download(frame_gray);
    }
    else
    {
        // 2. Fill in thresholded areas
        morphologyEx(frame_gray, frame_gray, CV_MOP_CLOSE, Mat1b(imgMorphPixels,imgMorphPixels,1), Point(-1, -1), 2);
        GaussianBlur(frame_gray, frame_gray, Size(imgBlurPixels,imgBlurPixels), 1, 1);
        // Select single largest region from image, if singleRegionChoice is selected (1)
    }


    if (singleRegionChoice)
    {
        *skinMask = cannySegmentation(frame_gray, -1, displayFaces);
    }
    else // Detect each separate block and remove blobs smaller than a few pixels
    {
        *skinMask = cannySegmentation(frame_gray, minPixelSize, displayFaces);
    }

    // Just return skin
    Mat frame_skin;
    captureframe.copyTo(frame_skin,*skinMask);  // Copy captureframe data to frame_skin, using mask from frame_ttt
    // Resize image to original before return
    cv::resize(frame_skin,frame_skin,s);

    if (displayFaces)
    {
        imshow("Skin HSV (B)",frame);
        imshow("Adaptive_threshold (D1)",frame_gray);
        imshow("Skin segmented",frame_skin);
    }

    return frame_skin;
    waitKey(1);
}
Esempio n. 13
0
inline
void Stream::enqueueUpload(InputArray src, GpuMat& dst)
{
    dst.upload(src, *this);
}
Esempio n. 14
0
void cv::gpu::evenLevels(GpuMat& levels, int nLevels, int lowerLevel, int upperLevel)
{
    Mat host_levels(1, nLevels, CV_32SC1);
    nppSafeCall( nppiEvenLevelsHost_32s(host_levels.ptr<Npp32s>(), nLevels, lowerLevel, upperLevel) );
    levels.upload(host_levels);
}